diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7e0910c449e9..8c635c516450 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,16 +1,16 @@ # Details: https://help.github.com/en/articles/about-code-owners # Default code owners -* @danieljanes @tanertopal +* @tanertopal @danieljanes # README.md README.md @jafermarq @tanertopal @danieljanes # Flower Baselines -/baselines @jafermarq @danieljanes +/baselines @jafermarq @tanertopal @danieljanes # Flower Benchmarks -/benchmarks @jafermarq @danieljanes +/benchmarks @jafermarq @tanertopal @danieljanes # Flower Datasets /datasets @jafermarq @tanertopal @danieljanes diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index b5c27c9b4834..4528baea9536 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -79,7 +79,7 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 # v5.6.1 with: images: ${{ inputs.namespace-repository }} @@ -93,10 +93,10 @@ jobs: password: ${{ secrets.dockerhub-token }} - name: Build and push - uses: Wandalen/wretry.action@6feedb7dedadeb826de0f45ff482b53b379a7844 # v3.5.0 + uses: Wandalen/wretry.action@ffdd254f4eaf1562b8a2c66aeaa37f1ff2231179 # v3.7.3 id: build with: - action: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + action: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 attempt_limit: 60 # 60 attempts * (9 secs delay + 1 sec retry) = ~10 mins attempt_delay: 9000 # 9 secs with: | @@ -139,7 +139,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 # v5.6.1 with: images: ${{ inputs.namespace-repository }} tags: ${{ inputs.tags }} diff --git a/.github/workflows/docker-build-main.yml b/.github/workflows/docker-build-main.yml index 81ef845eae29..38a9cd56942b 100644 --- a/.github/workflows/docker-build-main.yml +++ b/.github/workflows/docker-build-main.yml @@ -14,7 +14,7 @@ jobs: outputs: pip-version: ${{ steps.versions.outputs.pip-version }} setuptools-version: ${{ steps.versions.outputs.setuptools-version }} - flwr-version-ref: ${{ steps.versions.outputs.flwr-version-ref }} + matrix: ${{ steps.versions.outputs.matrix }} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -25,21 +25,26 @@ jobs: run: | echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" - echo "flwr-version-ref=git+${{ github.server_url }}/${{ github.repository }}.git@${{ github.sha }}" >> "$GITHUB_OUTPUT" + FLWR_VERSION_REF="git+${{ github.server_url }}/${{ github.repository }}.git@${{ github.sha }}" + python dev/build-docker-image-matrix.py --flwr-version "${FLWR_VERSION_REF}" --matrix unstable > matrix.json + echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-docker-base-images: name: Build base images if: github.repository == 'adap/flower' uses: ./.github/workflows/_docker-build.yml needs: parameters + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.parameters.outputs.matrix).base }} with: - namespace-repository: flwr/base - file-dir: src/docker/base/ubuntu + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} build-args: | PIP_VERSION=${{ needs.parameters.outputs.pip-version }} SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} - FLWR_VERSION_REF=${{ needs.parameters.outputs.flwr-version-ref }} - tags: unstable + ${{ matrix.images.build_args_encoded }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} @@ -48,22 +53,15 @@ jobs: name: Build binary images if: github.repository == 'adap/flower' uses: ./.github/workflows/_docker-build.yml - needs: build-docker-base-images + needs: [parameters, build-docker-base-images] strategy: fail-fast: false - matrix: - images: [ - { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, - { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, - { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, - { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, - { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } - ] + matrix: ${{ fromJson(needs.parameters.outputs.matrix).binary }} with: - namespace-repository: ${{ matrix.images.repository }} + namespace-repository: ${{ matrix.images.namespace_repository }} file-dir: ${{ matrix.images.file_dir }} - build-args: BASE_IMAGE=unstable - tags: unstable + build-args: BASE_IMAGE=${{ matrix.images.base_image }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-readme.yml b/.github/workflows/docker-readme.yml index 29dd787d638e..9e156e835056 100644 --- a/.github/workflows/docker-readme.yml +++ b/.github/workflows/docker-readme.yml @@ -24,7 +24,7 @@ jobs: list-files: "json" filters: | readme: - - 'src/docker/**/README.md' + - added|modified: 'src/docker/**/README.md' update: if: ${{ needs.collect.outputs.readme_files != '' && toJson(fromJson(needs.collect.outputs.readme_files)) != '[]' }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 012f584561ac..0f462d9a49da 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -67,7 +67,7 @@ jobs: - connection: insecure authentication: client-auth name: | - SuperExec / + Exec API / Python ${{ matrix.python-version }} / ${{ matrix.connection }} / ${{ matrix.authentication }} / @@ -102,12 +102,12 @@ jobs: python -m pip install "${WHEEL_URL}" fi - name: > - Run SuperExec test / + Run Exec API test / ${{ matrix.connection }} / ${{ matrix.authentication }} / ${{ matrix.engine }} working-directory: e2e/${{ matrix.directory }} - run: ./../test_superexec.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" + run: ./../test_exec_api.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" frameworks: runs-on: ubuntu-22.04 @@ -347,3 +347,59 @@ jobs: cd tmp-${{ matrix.framework }} flwr build flwr install *.fab + + numpy: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + fail-fast: false + matrix: + numpy-version: ["1.26"] + python-version: ["3.11"] + directory: [e2e-bare-auth] + connection: [insecure] + engine: [deployment-engine, simulation-engine] + authentication: [no-auth] + name: | + NumPy ${{ matrix.numpy-version }} / + Python ${{ matrix.python-version }} / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + defaults: + run: + working-directory: e2e/${{ matrix.directory }} + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + working-directory: ./ + run: | + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install ".[simulation]" "numpy>=${{ matrix.numpy-version }},<2.0" + else + python -m pip install . "numpy>=${{ matrix.numpy-version }},<2.0" + fi + - name: Download and install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + # Define base URL for wheel file + WHEEL_URL="https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }}" + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install "flwr[simulation] @ ${WHEEL_URL}" "numpy>=${{ matrix.numpy-version }},<2.0" + else + python -m pip install "${WHEEL_URL}" "numpy>=${{ matrix.numpy-version }},<2.0" + fi + - name: > + Run Flower - NumPy 1.26 test / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + working-directory: e2e/${{ matrix.directory }} + run: ./../test_exec_api.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index e608329872de..6af0c281882b 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -71,7 +71,7 @@ jobs: - id: matrix run: | - python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" > matrix.json + python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" --matrix stable > matrix.json echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-base-images: @@ -86,13 +86,10 @@ jobs: namespace-repository: ${{ matrix.images.namespace_repository }} file-dir: ${{ matrix.images.file_dir }} build-args: | - PYTHON_VERSION=${{ matrix.images.python_version }} PIP_VERSION=${{ needs.parameters.outputs.pip-version }} SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} - DISTRO=${{ matrix.images.distro.name }} - DISTRO_VERSION=${{ matrix.images.distro.version }} - FLWR_VERSION=${{ matrix.images.flwr_version }} - tags: ${{ matrix.images.tag }} + ${{ matrix.images.build_args_encoded }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} @@ -109,7 +106,7 @@ jobs: namespace-repository: ${{ matrix.images.namespace_repository }} file-dir: ${{ matrix.images.file_dir }} build-args: BASE_IMAGE=${{ matrix.images.base_image }} - tags: ${{ matrix.images.tags }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/release-nightly.yml b/.github/workflows/release-nightly.yml index fcefff300cb7..d1de7bed531e 100644 --- a/.github/workflows/release-nightly.yml +++ b/.github/workflows/release-nightly.yml @@ -13,11 +13,10 @@ jobs: name: Relase nightly on PyPI if: github.repository == 'adap/flower' outputs: - name: ${{ steps.release.outputs.name }} - version: ${{ steps.release.outputs.version }} skip: ${{ steps.release.outputs.skip }} pip-version: ${{ steps.release.outputs.pip-version }} setuptools-version: ${{ steps.release.outputs.setuptools-version }} + matrix: ${{ steps.release.outputs.matrix }} steps: - uses: actions/checkout@v4 - name: Bootstrap @@ -33,27 +32,30 @@ jobs: echo "skip=true" >> $GITHUB_OUTPUT fi - echo "name=$(poetry version | awk {'print $1'})" >> $GITHUB_OUTPUT - echo "version=$(poetry version -s)" >> $GITHUB_OUTPUT echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + NAME=$(poetry version | awk {'print $1'}) + VERSION=$(poetry version -s) + python dev/build-docker-image-matrix.py --flwr-version "${VERSION}" --matrix nightly --flwr-package "${NAME}" > matrix.json + echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT + build-docker-base-images: name: Build nightly base images if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' uses: ./.github/workflows/_docker-build.yml needs: release-nightly + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.release-nightly.outputs.matrix).base }} with: - namespace-repository: flwr/base - file-dir: src/docker/base/ubuntu + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} build-args: | PIP_VERSION=${{ needs.release-nightly.outputs.pip-version }} SETUPTOOLS_VERSION=${{ needs.release-nightly.outputs.setuptools-version }} - FLWR_VERSION=${{ needs.release-nightly.outputs.version }} - FLWR_PACKAGE=${{ needs.release-nightly.outputs.name }} - tags: | - ${{ needs.release-nightly.outputs.version }} - nightly + ${{ matrix.images.build_args_encoded }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} @@ -65,21 +67,12 @@ jobs: needs: [release-nightly, build-docker-base-images] strategy: fail-fast: false - matrix: - images: [ - { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, - { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, - { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, - { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, - { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } - ] + matrix: ${{ fromJson(needs.release-nightly.outputs.matrix).binary }} with: - namespace-repository: ${{ matrix.images.repository }} + namespace-repository: ${{ matrix.images.namespace_repository }} file-dir: ${{ matrix.images.file_dir }} - build-args: BASE_IMAGE=${{ needs.release-nightly.outputs.version }} - tags: | - ${{ needs.release-nightly.outputs.version }} - nightly + build-args: BASE_IMAGE=${{ matrix.images.base_image }} + tags: ${{ matrix.images.tags_encoded }} secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/update_translations.yml b/.github/workflows/update_translations.yml index 9419f4aaef25..9a5391a40438 100644 --- a/.github/workflows/update_translations.yml +++ b/.github/workflows/update_translations.yml @@ -12,11 +12,17 @@ jobs: contents: write pull-requests: write env: - branch-name: auto-update-trans-text + base-branch: main # The base branch for the PR name: Update text steps: - uses: actions/checkout@v4 + - name: Generate unique branch name + id: generate_branch + run: | + export BRANCH_NAME="auto-update-trans-text-$(date +'%Y%m%d-%H%M%S')" + echo "branch-name=$BRANCH_NAME" >> $GITHUB_ENV + - name: Bootstrap uses: ./.github/actions/bootstrap with: @@ -65,14 +71,15 @@ jobs: uses: ad-m/github-push-action@master with: github_token: ${{ secrets.GITHUB_TOKEN }} - branch: '${{ env.branch-name }}' + branch: ${{ env.branch-name }} - name: Create Pull Request if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 uses: peter-evans/create-pull-request@v6 with: token: ${{ secrets.GITHUB_TOKEN }} - branch: '${{ env.branch-name }}' + branch: ${{ env.branch-name }} + base: ${{ env.base-branch }} delete-branch: true title: 'docs(framework:skip) Update source texts for translations (automated)' body: 'This PR is auto-generated to update text and language files.' diff --git a/.gitignore b/.gitignore index b0962c2783f0..96789cbf6e00 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,9 @@ examples/**/dataset/** # Flower Baselines baselines/datasets/leaf +# Exclude ee package +src/py/flwr/ee + # macOS .DS_Store @@ -183,3 +186,6 @@ app/src/main/assets /captures .externalNativeBuild .cxx + +# Pyright +pyrightconfig.json diff --git a/README.md b/README.md index 7aa73fe609bb..b5c58c6838f0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Flower: A Friendly Federated Learning Framework +# Flower: A Friendly Federated AI Framework

@@ -21,7 +21,7 @@ [![Docker Hub](https://img.shields.io/badge/Docker%20Hub-flwr-blue)](https://hub.docker.com/u/flwr) [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) -Flower (`flwr`) is a framework for building federated learning systems. The +Flower (`flwr`) is a framework for building federated AI systems. The design of Flower is based on a few guiding principles: - **Customizable**: Federated learning systems vary wildly from one use case to diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py index a2667dbcf006..9d5d4ea7fc92 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/doc/source/conf.py @@ -37,7 +37,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.13.0" +release = "1.14.0" # -- General configuration --------------------------------------------------- diff --git a/baselines/doc/source/how-to-contribute-baselines.rst b/baselines/doc/source/how-to-contribute-baselines.rst index 429ac714c1aa..5838f489d313 100644 --- a/baselines/doc/source/how-to-contribute-baselines.rst +++ b/baselines/doc/source/how-to-contribute-baselines.rst @@ -65,7 +65,7 @@ Flower is known and loved for its usability. Therefore, make sure that your base flwr run . # Run the baseline overriding the config - flwr run . --run-config lr=0.01,num-server-rounds=200 + flwr run . --run-config "lr=0.01 num-server-rounds=200" We look forward to your contribution! \ No newline at end of file diff --git a/baselines/doc/source/index.rst b/baselines/doc/source/index.rst index 3a19e74b891e..2ca39776dc8e 100644 --- a/baselines/doc/source/index.rst +++ b/baselines/doc/source/index.rst @@ -1,7 +1,7 @@ Flower Baselines Documentation ============================== -Welcome to Flower Baselines' documentation. `Flower `_ is a friendly federated learning framework. +Welcome to Flower Baselines' documentation. `Flower `_ is a friendly federated AI framework. Join the Flower Community diff --git a/baselines/feddebug/.gitignore b/baselines/feddebug/.gitignore new file mode 100644 index 000000000000..bd99fc54b9d5 --- /dev/null +++ b/baselines/feddebug/.gitignore @@ -0,0 +1 @@ +outputs/ \ No newline at end of file diff --git a/baselines/feddebug/LICENSE b/baselines/feddebug/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/feddebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/feddebug/README.md b/baselines/feddebug/README.md new file mode 100644 index 000000000000..ca93fd4b4426 --- /dev/null +++ b/baselines/feddebug/README.md @@ -0,0 +1,233 @@ +--- +title: FedDebug Systematic Debugging for Federated Learning Applications +url: https://dl.acm.org/doi/abs/10.1109/ICSE48619.2023.00053 +labels: [malicious client, debugging, fault localization, image classification, data poisoning] +dataset: [cifar10, mnist] +--- + +# FedDebug: Systematic Debugging for Federated Learning Applications + +> [!NOTE] +> If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** [dl.acm.org/doi/abs/10.1109/ICSE48619.2023.00053](https://dl.acm.org/doi/abs/10.1109/ICSE48619.2023.00053) + +**Authors:** [Waris Gill](https://people.cs.vt.edu/waris/) (Virginia Tech, USA), [Ali Anwar](https://cse.umn.edu/cs/ali-anwar) (University of Minnesota Twin Cities, USA), [Muhammad Ali Gulzar](https://people.cs.vt.edu/~gulzar/) (Virginia Tech, USA) + +**Abstract:** In Federated Learning (FL), clients independently train local models and share them with a central aggregator to build a global model. Impermissibility to access clients' data and collaborative training make FL appealing for applications with data-privacy concerns, such as medical imaging. However, these FL characteristics pose unprecedented challenges for debugging. When a global model's performance deteriorates, identifying the responsible rounds and clients is a major pain point. Developers resort to trial-and-error debugging with subsets of clients, hoping to increase the global model's accuracy or let future FL rounds retune the model, which are time-consuming and costly. +We design a systematic fault localization framework, FedDebug, that advances the FL debugging on two novel fronts. First, FedDebug enables interactive debugging of realtime collaborative training in FL by leveraging record and replay techniques to construct a simulation that mirrors live FL. FedDebug's _breakpoint_ can help inspect an FL state (round, client, and global model) and move between rounds and clients' models seamlessly, enabling a fine-grained step-by-step inspection. Second, FedDebug automatically identifies the client(s) responsible for lowering the global model's performance without any testing data and labels---both are essential for existing debugging techniques. FedDebug's strengths come from adapting differential testing in conjunction with neuron activations to determine the client(s) deviating from normal behavior. FedDebug achieves 100% accuracy in finding a single faulty client and 90.3% accuracy in finding multiple faulty clients. FedDebug's interactive debugging incurs 1.2% overhead during training, while it localizes a faulty client in only 2.1% of a round's training time. With FedDebug, we bring effective debugging practices to federated learning, improving the quality and productivity of FL application developers. + + + + + +Malicious Client Localization + + +## About this baseline + +**What's implemented:** +FedDebug is a systematic malicious client(s) localization framework designed to advance debugging in Federated Learning (FL). It enables interactive debugging of real-time collaborative training and automatically identifies clients responsible for lowering global model performance without requiring testing data or labels. + +This repository implements the FedDebug technique of localizing malicious client(s) in a generic way, allowing it to be used with various fusion techniques (FedAvg, FedProx) and CNN architectures. You can find the original code of FedDebug [here](https://github.com/SEED-VT/FedDebug). + + +**Flower Datasets:** This baseline integrates `flwr-datasets` and tested on CIFAR-10 and MNIST datasets. The code is designed to work with other datasets as well. You can easily extend the code to work with other datasets by following the Flower dataset guidelines. + + +**Hardware Setup:** +These experiments were run on a machine with 8 CPU cores and an Nvidia Tesla P40 GPU. +> [!NOTE] +> This baseline also contains a smaller CNN model (LeNet) to run all these experiments on a CPU. Furthermore, the experiments are also scaled down to obtain representative results of the FedDebug evaluations. + +**Contributors:** Waris Gill ([GitHub Profile](https://github.com/warisgill)) + +## Experimental Setup + +**Task:** Image classification, Malicious/Faulty Client(s) Removal, Debugging and Testing + +**Models:** This baseline implements two CNN architectures: LeNet and ResNet. Other CNN models (DenseNet, VGG, etc.) are also supported. Check the `conf/base.yaml` file for more details. + +**Dataset:** The datasets are partitioned among clients, and each client participates in the training (cross-silo). However, you can easily extend the code to work in cross-device settings. This baseline uses Dirichlet partitioning to partition the datasets among clients for Non-IID experiments. However, the original paper uses quantity-based imbalance approach ([niid_bench](https://arxiv.org/abs/2102.02079)). + +| Dataset | #classes | #clients | partitioning method | +| :------- | :------: | :------: | :-----------------: | +| CIFAR-10 | 10 | 10 | IID and Non-IID | +| MNIST | 10 | 10 | IID and Non-IID | + +**FL Training Hyperparameters and FedDebug Configuration:** +Default training hyperparameters are in `conf/base.yaml`. + +## Environment Setup + +Experiments are conducted with `Python 3.10.14`. It is recommended to use Python 3.10 for the experiments. +Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): + +```bash +curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc +``` + +You can then install any Python version with `pyenv install 3.10.14` Then, in order to use FedDebug baseline, you'd do the following: + +```bash +# cd to your feddebug directory (i.e. where the `pyproject.toml` is) +pyenv local 3.10.14 +poetry env use 3.10.14 # set that version for poetry + +# run this from the same directory as the `pyproject.toml` file is +poetry install +poetry shell + +# check the python version by running the following command +python --version # it should be >=3.10.14 +``` + +This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`). + +## Running the Experiments + +> [!NOTE] +> You can run almost any evaluation from the paper by changing the parameters in `conf/base.yaml`. Also, you can change the resources (per client CPU and GPU) in `conf/base.yaml` to speed up the simulation. Please check the Flower simulation guide for more details ([Flower Framework main](https://flower.ai/docs/framework/how-to-run-simulations.html)). + +The following command will run the default experimental setting in `conf/base.yaml` (LeNet, MNIST, with a total of 10 clients, where client-0 is malicious). FedDebug will identify client-0 as the malicious client. **The experiment took on average 60 seconds to complete.** + +```bash +python -m feddebug.main device=cpu +``` + +Output of the last round will show the FedDebug output as follows: + +```log +... +[2024-10-24 12:25:48,758][flwr][INFO] - ***FedDebug Output Round 5 *** +[2024-10-24 12:25:48,758][flwr][INFO] - True Malicious Clients (Ground Truth) = ['0'] +[2024-10-24 12:25:48,758][flwr][INFO] - Total Random Inputs = 10 +[2024-10-24 12:25:48,758][flwr][INFO] - Predicted Malicious Clients = {'0': 1.0} +[2024-10-24 12:25:48,758][flwr][INFO] - FedDebug Localization Accuracy = 100.0 +[2024-10-24 12:25:49,577][flwr][INFO] - fit progress: (5, 0.00015518503449857236, {'accuracy': 0.978, 'loss': 0.00015518503449857236, 'round': 5}, 39.02993568999227) +[2024-10-24 12:25:49,577][flwr][INFO] - configure_evaluate: no clients selected, skipping evaluation +[2024-10-24 12:25:49,577][flwr][INFO] - +[2024-10-24 12:25:49,577][flwr][INFO] - [SUMMARY] +... + +``` +It predicts the malicious client(s) with 100% accuracy. `Predicted Malicious Clients = {'0': 1.0}` means that client-0 is predicted as the malicious client with 1.0 probability. It will also generate a graph `iid-lenet-mnist.png` as shown below: + +FedDebug Malicious Client Localization IID-LeNet-MNIST + + + +## FedDebug Diverse Experiment Scenarios +Next, we demonstrate FedDebug experiments across key scenarios: detecting multiple malicious clients (Section 5-B), running with various models, datasets, and devices (including GPU), and examining how neuron activation thresholds impact localization accuracy (Section 5-C). Understanding these scenarios will help you adapt FedDebug to your specific needs and evaluate any configuration you wish to explore from the paper. + + +### 1. Multiple Malicious Clients +To test the localization of multiple malicious clients, you can change the `total_malicious_clients`. Total Time Taken: 46.7 seconds. + +```bash +python -m feddebug.main device=cpu total_malicious_clients=2 dataset.name=cifar10 +``` +In this scenario, clients 0 and 1 are now malicious. The output will show the FedDebug output as follows: + +```log +... +[2024-10-24 12:28:14,125][flwr][INFO] - ***FedDebug Output Round 5 *** +[2024-10-24 12:28:14,125][flwr][INFO] - True Malicious Clients (Ground Truth) = ['0', '1'] +[2024-10-24 12:28:14,125][flwr][INFO] - Total Random Inputs = 10 +[2024-10-24 12:28:14,125][flwr][INFO] - Predicted Malicious Clients = {'0': 1.0, '1': 1.0} +[2024-10-24 12:28:14,125][flwr][INFO] - FedDebug Localization Accuracy = 100.0 +[2024-10-24 12:28:15,148][flwr][INFO] - fit progress: (5, 0.003398957598209381, {'accuracy': 0.4151, 'loss': 0.003398957598209381, 'round': 5}, 35.81892481799878) +[2024-10-24 12:28:15,148][flwr][INFO] - configure_evaluate: no clients selected, skipping evaluation +[2024-10-24 12:28:15,148][flwr][INFO] - +[2024-10-24 12:28:15,148][flwr][INFO] - [SUMMARY] +... +``` +FedDebug predicts the malicious clients with 100% accuracy. `Predicted Malicious Clients = {'0': 1.0, '1': 1.0}` means that clients 0 and 1 are predicted as the malicious clients with 1.0 probability. It will also generate a graph `iid-lenet-cifar10.png` as shown below: + + +FedDebug Malicious Client Localization IID-LeNet-CIFAR10 + + +### 2. Changing the Model and Device +To run the experiments with ResNet and `Cuda` with Non-IID distribution you can run the following command. Total Time Taken: 84 seconds. + +> [!NOTE] +> You can run FedDebug with any *model* list in the `conf/base.yaml` file at line 24. Furthermore, you can quickly add additional models in `feddebug/models.py` at line 47. + + +```bash +python -m feddebug.main device=cuda model=resnet18 distribution=non_iid + +``` +Output +```log +... +[2024-10-24 12:13:40,679][flwr][INFO] - ***FedDebug Output Round 5 *** +[2024-10-24 12:13:40,679][flwr][INFO] - True Malicious Clients (Ground Truth) = ['0'] +[2024-10-24 12:13:40,679][flwr][INFO] - Total Random Inputs = 10 +[2024-10-24 12:13:40,679][flwr][INFO] - Predicted Malicious Clients = {'0': 1.0} +[2024-10-24 12:13:40,679][flwr][INFO] - FedDebug Localization Accuracy = 100.0 +[2024-10-24 12:13:41,595][flwr][INFO] - fit progress: (5, 0.000987090128660202, {'accuracy': 0.8528, 'loss': 0.000987090128660202, 'round': 5}, 75.3773579710105) +[2024-10-24 12:13:41,595][flwr][INFO] - configure_evaluate: no clients selected, skipping evaluation +[2024-10-24 12:13:41,602][flwr][INFO] - +[2024-10-24 12:13:41,602][flwr][INFO] - [SUMMARY] +``` +Following is the graph `non_iid-resnet18-mnist.png` generated by the code: + + +FedDebug Malicious Client Localization Non-IID-ResNet18-MNIST + + +### 3. Threshold Impact on Localization +You can also test the impact of the neuron activation threshold on localization accuracy. A higher threshold decreases the localization accuracy. Total Time Taken: 84 seconds. + +```bash +python -m feddebug.main device=cuda model=resnet18 feddebug.na_t=0.7 +``` + +```log +... +[2024-10-24 12:21:26,923][flwr][INFO] - ***FedDebug Output Round 2 *** +[2024-10-24 12:21:26,923][flwr][INFO] - True Malicious Clients (Ground Truth) = ['0'] +[2024-10-24 12:21:26,923][flwr][INFO] - Total Random Inputs = 10 +[2024-10-24 12:21:26,923][flwr][INFO] - Predicted Malicious Clients = {'5': 0.7, '0': 0.3} +[2024-10-24 12:21:26,923][flwr][INFO] - FedDebug Localization Accuracy = 30.0 +[2024-10-24 12:21:27,773][flwr][INFO] - fit progress: (2, 0.001345307207107544, {'accuracy': 0.9497, 'loss': 0.001345307207107544, 'round': 2}, 31.669926984992344) +[2024-10-24 12:21:27,773][flwr][INFO] - configure_evaluate: no clients selected, skipping evaluation +[2024-10-24 12:21:27,773][flwr][INFO] - +``` + +Following is the graph `iid-resnet18-mnist.png` generated by the code: + +FedDebug Malicious Client Localization IID-ResNet18-MNIST + + +> [!WARNING] +> FedDebug generates random inputs to localize malicious client(s). Thus, results might vary slightly on each run due to randomness. + + + +## Limitations and Discussion +Compared to the current baseline, FedDebug was originally evaluated using only a single round of training. It was not initially tested with Dirichlet partitioning for data distribution, which means it may deliver suboptimal performance under different data distribution settings. Enhancing FedDebug's performance could be achieved by generating more effective random inputs, for example, through the use of Generative Adversarial Networks (GANs). + + +## Application of FedDebug +We used FedDebug to detect `backdoor attacks` in Federated Learning, resulting in [FedDefender](https://dl.acm.org/doi/10.1145/3617574.3617858). The code is implemented using the Flower Framework in [this repository](https://github.com/warisgill/FedDefender). We plan to adapt FedDefender to Flower baseline guidelines soon. + +## Citation +If you have any questions or feedback, feel free to contact me at `waris@vt.edu`. Please cite FedDebug as follows: + +```bibtex +@inproceedings{gill2023feddebug, + title={{Feddebug: Systematic Debugging for Federated Learning Applications}}, + author={Gill, Waris and Anwar, Ali and Gulzar, Muhammad Ali}, + booktitle={2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE)}, + pages={512--523}, + year={2023}, + organization={IEEE} +} +``` + + + + diff --git a/baselines/feddebug/_static/feddbug-approach.png b/baselines/feddebug/_static/feddbug-approach.png new file mode 100644 index 000000000000..046e0ec9ceb9 Binary files /dev/null and b/baselines/feddebug/_static/feddbug-approach.png differ diff --git a/baselines/feddebug/_static/iid-lenet-cifar10.png b/baselines/feddebug/_static/iid-lenet-cifar10.png new file mode 100644 index 000000000000..a5e0aa87a3c6 Binary files /dev/null and b/baselines/feddebug/_static/iid-lenet-cifar10.png differ diff --git a/baselines/feddebug/_static/iid-lenet-mnist.png b/baselines/feddebug/_static/iid-lenet-mnist.png new file mode 100644 index 000000000000..4414c658a3c2 Binary files /dev/null and b/baselines/feddebug/_static/iid-lenet-mnist.png differ diff --git a/baselines/feddebug/_static/iid-resnet18-mnist.png b/baselines/feddebug/_static/iid-resnet18-mnist.png new file mode 100644 index 000000000000..0538ce6413a2 Binary files /dev/null and b/baselines/feddebug/_static/iid-resnet18-mnist.png differ diff --git a/baselines/feddebug/_static/non_iid-resnet18-mnist.png b/baselines/feddebug/_static/non_iid-resnet18-mnist.png new file mode 100644 index 000000000000..bda745c75942 Binary files /dev/null and b/baselines/feddebug/_static/non_iid-resnet18-mnist.png differ diff --git a/baselines/feddebug/feddebug/__init__.py b/baselines/feddebug/feddebug/__init__.py new file mode 100644 index 000000000000..a5e567b59135 --- /dev/null +++ b/baselines/feddebug/feddebug/__init__.py @@ -0,0 +1 @@ +"""Template baseline package.""" diff --git a/baselines/feddebug/feddebug/client.py b/baselines/feddebug/feddebug/client.py new file mode 100644 index 000000000000..3b3138e63156 --- /dev/null +++ b/baselines/feddebug/feddebug/client.py @@ -0,0 +1,45 @@ +"""Define your client class and a function to construct such clients. + +Please overwrite `flwr.client.NumPyClient` or `flwr.client.Client` and create a function +to instantiate your client. +""" + +from logging import INFO + +import flwr as fl +from flwr.common.logger import log + +from feddebug.models import train_neural_network +from feddebug.utils import get_parameters, set_parameters + + +class CNNFlowerClient(fl.client.NumPyClient): + """Flower client for training a CNN model.""" + + def __init__(self, args): + """Initialize the client with the given configuration.""" + self.args = args + + def fit(self, parameters, config): + """Train the model on the local dataset.""" + nk_client_data_points = len(self.args["client_data_train"]) + model = self.args["model"] + + set_parameters(model, parameters=parameters) + train_dict = train_neural_network( + { + "lr": config["lr"], + "epochs": config["local_epochs"], + "batch_size": config["batch_size"], + "model": model, + "train_data": self.args["client_data_train"], + "device": self.args["device"], + } + ) + + parameters = get_parameters(model) + + client_train_dict = {"cid": self.args["cid"]} | train_dict + + log(INFO, "Client %s trained.", self.args["cid"]) + return parameters, nk_client_data_points, client_train_dict diff --git a/baselines/feddebug/feddebug/conf/base.yaml b/baselines/feddebug/feddebug/conf/base.yaml new file mode 100644 index 000000000000..4df403ec3e87 --- /dev/null +++ b/baselines/feddebug/feddebug/conf/base.yaml @@ -0,0 +1,61 @@ +# General Configuration +num_rounds: 5 +num_clients: 10 +clients_per_round: ${num_clients} + +# Client Configuration +client: + epochs: 3 + lr: 0.001 + batch_size: 256 + +# Adversarial Settings +noise_rate: 1 +malicious_clients_ids: [0] # Malicious (also called faulty) client IDs (Ground Truth). Default client 0 is malicious. +total_malicious_clients: null # For inducing multiple malicious clients in Table-2. e.g., 2 means clients [0, 1] are malicious + +# FedDebug Configuration +feddebug: + fast: true # to generate randome inputs faster + r_inputs: 10 # number of random inputs to generate + na_t: 0.00 # neuron activation threshold + +# Model Configuration +model: lenet # Options: lenet, resnet18, resnet34, resnet50, resnet101, resnet152, densenet121, vgg16 + +# Dataset Configuration +distribution: 'iid' # Change to "iid" for iid data distribution. Change it to `non_iid` for non-iid data distribution. +dataset_channels: + cifar10: 3 # RGB + mnist: 1 + +dataset_classes: + cifar10: 10 + mnist: 10 + +dataset: + name: mnist + num_classes: ${dataset_classes.${dataset.name}} + channels: ${dataset_channels.${dataset.name}} + + +# Device and Resource Configuration +device: cpu +total_gpus: 1 +total_cpus: 10 + +client_resources: + num_cpus: 2 + num_gpus: 0.2 # Note that `num_gpus` is only used when the device is set to `cuda` (i.e., `device = cuda`) + + +# Logging Configuration (Hydra) +hydra: + job_logging: + root: + level: INFO # Set the job logging level to INFO + loggers: + flwr: + level: INFO + accelerate.utils.other: + level: ERROR \ No newline at end of file diff --git a/baselines/feddebug/feddebug/dataset.py b/baselines/feddebug/feddebug/dataset.py new file mode 100644 index 000000000000..7964ad4083a4 --- /dev/null +++ b/baselines/feddebug/feddebug/dataset.py @@ -0,0 +1,165 @@ +"""Handle basic dataset creation. + +In case of PyTorch it should return dataloaders for your dataset (for both the clients +and the server). If you are using a custom dataset class, this module is the place to +define it. If your dataset requires to be downloaded (and this is not done +automatically -- e.g. as it is the case for many dataset in TorchVision) and +partitioned, please include all those functions and logic in the +`dataset_preparation.py` module. You can use all those functions from functions/methods +defined here of course. +""" + +import random +from logging import INFO + +import numpy as np +from flwr.common.logger import log +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner, IidPartitioner +from torch.utils.data import DataLoader +from torchvision import transforms + +from feddebug.utils import create_transform + + +def _add_noise_in_data(my_dataset, noise_rate, num_classes): + """Introduce label noise by flipping labels based on the noise rate.""" + + def flip_labels(batch): + labels = np.array(batch["label"]) + flip_mask = np.random.rand(len(labels)) < noise_rate + indices_to_flip = np.where(flip_mask)[0] + if len(indices_to_flip) > 0: + new_labels = labels[indices_to_flip].copy() + for idx in indices_to_flip: + current_label = new_labels[idx] + possible_labels = list(range(num_classes)) + possible_labels.remove(current_label) + new_labels[idx] = random.choice(possible_labels) + + labels[indices_to_flip] = new_labels + batch["label"] = labels + return batch + + noisy_dataset = my_dataset.map( + flip_labels, batched=True, batch_size=256, num_proc=8 + ).with_format("torch") + return noisy_dataset + + +def _create_dataloader(my_dataset, batch_size=64, shuffle=True): + """Create a DataLoader with applied transformations.""" + col = "image" if "image" in my_dataset.column_names else "img" + + def apply_transforms(batch): + batch["image"] = [transform(img) for img in batch[col]] + if col != "image": + del batch[col] + return batch + + temp_transform = create_transform() + + transform = transforms.Compose( + [ + transforms.Resize((32, 32)), + transforms.ToTensor(), + temp_transform, + ] + ) + transformed_dataset = my_dataset.with_transform(apply_transforms) + dataloader = DataLoader(transformed_dataset, batch_size=batch_size, shuffle=shuffle) + return dataloader + + +class ClientsAndServerDatasets: + """Prepare the clients and server datasets for federated learning.""" + + def __init__(self, cfg): + self.cfg = cfg + self.client_id_to_loader = {} + self.server_testloader = None + self.clients_and_server_raw_data = None + + self._set_distribution_partitioner() + self._load_datasets() + self._introduce_label_noise() + + def _set_distribution_partitioner(self): + """Set the data distribution partitioner based on configuration.""" + if self.cfg.distribution == "iid": + self.data_dist_partitioner_func = self._iid_data_distribution + elif self.cfg.distribution == "non_iid": + self.data_dist_partitioner_func = self._dirichlet_data_distribution + else: + raise ValueError(f"Unknown distribution type: {self.cfg.distribution}") + + def _dirichlet_data_distribution(self, target_label_col: str = "label"): + """Partition data using Dirichlet distribution.""" + partitioner = DirichletPartitioner( + num_partitions=self.cfg.num_clients, + partition_by=target_label_col, + alpha=2, + min_partition_size=0, + self_balancing=True, + ) + return self._partition_helper(partitioner) + + def _iid_data_distribution(self): + """Partition data using IID distribution.""" + partitioner = IidPartitioner(num_partitions=self.cfg.num_clients) + return self._partition_helper(partitioner) + + def _partition_helper(self, partitioner): + fds = FederatedDataset( + dataset=self.cfg.dataset.name, partitioners={"train": partitioner} + ) + server_data = fds.load_split("test") + client2data = { + f"{cid}": fds.load_partition(cid) for cid in range(self.cfg.num_clients) + } + return {"client2data": client2data, "server_data": server_data} + + def _load_datasets(self): + """Load and partition the datasets based on the partitioner.""" + self.clients_and_server_raw_data = self.data_dist_partitioner_func() + self._create_client_dataloaders() + self.server_testloader = _create_dataloader( + self.clients_and_server_raw_data["server_data"], + batch_size=512, + shuffle=False, + ) + + def _create_client_dataloaders(self): + """Create DataLoaders for each client.""" + self.client_id_to_loader = { + client_id: _create_dataloader( + client_data, batch_size=self.cfg.client.batch_size + ) + for client_id, client_data in self.clients_and_server_raw_data[ + "client2data" + ].items() + if client_id not in self.cfg.malicious_clients_ids + } + + def _introduce_label_noise(self): + """Introduce label noise to specified faulty clients.""" + faulty_client_ids = self.cfg.malicious_clients_ids + noise_rate = self.cfg.noise_rate + num_classes = self.cfg.dataset.num_classes + client2data = self.clients_and_server_raw_data["client2data"] + + for client_id in faulty_client_ids: + client_ds = client2data[client_id] + noisy_dataset = _add_noise_in_data(client_ds, noise_rate, num_classes) + self.client_id_to_loader[client_id] = _create_dataloader( + noisy_dataset, batch_size=self.cfg.client.batch_size + ) + + log(INFO, "** All Malicious Clients are: %s **", faulty_client_ids) + + def get_data(self): + """Get the prepared client and server DataLoaders.""" + return { + "server_testdata": self.server_testloader, + "client2data": self.client_id_to_loader, + } diff --git a/baselines/feddebug/feddebug/dataset_preparation.py b/baselines/feddebug/feddebug/dataset_preparation.py new file mode 100644 index 000000000000..0120ab9dfb0f --- /dev/null +++ b/baselines/feddebug/feddebug/dataset_preparation.py @@ -0,0 +1,8 @@ +"""Handle the dataset partitioning and (optionally) complex downloads. + +Please add here all the necessary logic to either download, uncompress, pre/post-process +your dataset (or all of the above). If the desired way of running your baseline is to +first download the dataset and partition it and then run the experiments, please +uncomment the lines below and tell us in the README.md (see the "Running the Experiment" +block) that this file should be executed first. +""" diff --git a/baselines/feddebug/feddebug/differential_testing.py b/baselines/feddebug/feddebug/differential_testing.py new file mode 100644 index 000000000000..d68d07b25090 --- /dev/null +++ b/baselines/feddebug/feddebug/differential_testing.py @@ -0,0 +1,220 @@ +"""Fed_Debug Differential Testing.""" + +import itertools +import time +from logging import DEBUG + +import torch +import torch.nn.functional as F +from flwr.common.logger import log + +from feddebug.neuron_activation import get_neurons_activations +from feddebug.utils import create_transform + + +def _predict_func(model, input_tensor): + model.eval() + logits = model(input_tensor) + preds = torch.argmax(F.log_softmax(logits, dim=1), dim=1) + pred = preds.item() + return pred + + +class InferenceGuidedInputGenerator: + """Generate random inputs based on the feedback from the clients.""" + + def __init__( + self, + clients2models, + input_shape, + transform_func, + k_gen_inputs=10, + min_nclients_same_pred=3, + time_delta=60, + faster_input_generation=False, + ): + self.clients2models = clients2models + self.input_shape = input_shape + self.transform = transform_func + self.k_gen_inputs = k_gen_inputs + self.min_nclients_same_pred = min_nclients_same_pred + self.time_delta = time_delta + self.faster_input_generation = faster_input_generation + self.seed = 0 + + def _get_random_input(self): + torch.manual_seed(self.seed) + self.seed += 1 + img = torch.rand(self.input_shape) + if self.transform: + return self.transform(img).unsqueeze(0) + return img.unsqueeze(0) + + def _simple_random_inputs(self): + start_time = time.time() + random_inputs = [self._get_random_input() for _ in range(self.k_gen_inputs)] + elapsed_time = time.time() - start_time + return random_inputs, elapsed_time + + def _generate_feedback_random_inputs(self): + print("Generating feedback-based random inputs") + random_inputs = [] + same_prediction_set = set() + start_time = time.time() + timeout = 60 + + while len(random_inputs) < self.k_gen_inputs: + img = self._get_random_input() + if self.min_nclients_same_pred > 1: + self._append_or_not(img, random_inputs, same_prediction_set) + else: + random_inputs.append(img) + + if time.time() - start_time > timeout: + timeout += 60 + self.min_nclients_same_pred -= 1 + print( + f">> Timeout: Number of distinct inputs: {len(random_inputs)}, " + f"decreasing min_nclients_same_pred " + f"to {self.min_nclients_same_pred} " + f"and extending timeout to {timeout} seconds" + ) + + elapsed_time = time.time() - start_time + return random_inputs, elapsed_time + + def _append_or_not(self, input_tensor, random_inputs, same_prediction_set): + preds = [ + _predict_func(model, input_tensor) for model in self.clients2models.values() + ] + for ci1, pred1 in enumerate(preds): + seq = {ci1} + for ci2, pred2 in enumerate(preds): + if ci1 != ci2 and pred1 == pred2: + seq.add(ci2) + + seq_str = ",".join(map(str, seq)) + if ( + seq_str not in same_prediction_set + and len(seq) >= self.min_nclients_same_pred + ): + same_prediction_set.add(seq_str) + random_inputs.append(input_tensor) + + def get_inputs(self): + """Return generated random inputs.""" + if self.faster_input_generation or len(self.clients2models) <= 10: + return self._simple_random_inputs() + return self._generate_feedback_random_inputs() + + +def _torch_intersection(client2tensors): + intersect = torch.ones_like(next(iter(client2tensors.values())), dtype=torch.bool) + for temp_t in client2tensors.values(): + intersect = torch.logical_and(intersect, temp_t) + return intersect + + +def _generate_leave_one_out_combinations(clients_ids): + """Generate and update all subsets of clients with a specified subset size.""" + subset_size = len(clients_ids) - 1 + subsets = [set(sub) for sub in itertools.combinations(clients_ids, subset_size)] + return subsets + + +class FaultyClientDetector: + """Faulty Client Localization using Neuron Activation.""" + + def __init__(self, device): + self.leave_1_out_combs = None + self.device = device + + def _get_clients_ids_with_highest_common_neurons(self, clients2neurons2boolact): + def _count_common_neurons(comb): + """Return the number of common neurons. + + In PyTorch, boolean values are treated as integers (True as 1 and False as + 0), so summing a tensor of boolean values will give you the count of True + values. + """ + c2act = {cid: clients2neurons2boolact[cid] for cid in comb} + intersect_tensor = _torch_intersection(c2act) + return intersect_tensor.sum().item() + + count_of_common_neurons = [ + _count_common_neurons(comb) for comb in self.leave_1_out_combs + ] + + highest_number_of_common_neurons = max(count_of_common_neurons) + val_index = count_of_common_neurons.index(highest_number_of_common_neurons) + val_clients_ids = self.leave_1_out_combs[val_index] + return val_clients_ids + + def get_client_neurons_activations(self, client2model, input_tensor): + """Get neuron activations for each client model.""" + client2acts = {} + for cid, model in client2model.items(): + model = model.to(self.device) + neurons_act = get_neurons_activations(model, input_tensor.to(self.device)) + client2acts[cid] = neurons_act.cpu() + model = model.cpu() + input_tensor = input_tensor.cpu() + return client2acts + + def get_malicious_clients(self, client2acts, na_t, num_bugs): + """Identify potential malicious clients based on neuron activations.""" + potential_faulty_clients = None + all_clients_ids = set(client2acts.keys()) + self.leave_1_out_combs = _generate_leave_one_out_combinations(all_clients_ids) + for _ in range(num_bugs): + client2_na = { + cid: activations > na_t for cid, activations in client2acts.items() + } + normal_clients_ids = self._get_clients_ids_with_highest_common_neurons( + client2_na + ) + + potential_faulty_clients = all_clients_ids - normal_clients_ids + log(DEBUG, "Malicious clients %s", potential_faulty_clients) + self.leave_1_out_combs = _generate_leave_one_out_combinations( + all_clients_ids - potential_faulty_clients + ) + + return potential_faulty_clients + + +def differential_testing_fl_clients( + client2model, + num_bugs, + num_inputs, + input_shape, + na_threshold, + faster_input_generation, + device, +): + """Differential Testing for FL Clients.""" + generate_inputs = InferenceGuidedInputGenerator( + clients2models=client2model, + input_shape=input_shape, + transform_func=create_transform(), + k_gen_inputs=num_inputs, + min_nclients_same_pred=3, + faster_input_generation=faster_input_generation, + ) + selected_inputs, _ = generate_inputs.get_inputs() + + predicted_faulty_clients = [] + localize = FaultyClientDetector(device) + + # Iterate over each random input tensor to detect malicious clients + for input_tensor in selected_inputs: + # Get neuron activations for each client model + client2acts = localize.get_client_neurons_activations( + client2model, input_tensor + ) + # Identify potential malicious clients based on activations and thresholds + potential_malicious_clients = localize.get_malicious_clients( + client2acts, na_threshold, num_bugs + ) + predicted_faulty_clients.append(potential_malicious_clients) + return predicted_faulty_clients diff --git a/baselines/feddebug/feddebug/main.py b/baselines/feddebug/feddebug/main.py new file mode 100644 index 000000000000..c3937b7f8a3d --- /dev/null +++ b/baselines/feddebug/feddebug/main.py @@ -0,0 +1,171 @@ +"""Create and connect the building blocks for your experiments; start the simulation. + +It includes processioning the dataset, instantiate strategy, specify how the global +model is going to be evaluated, etc. At the end, this script saves the results. +""" + +import time +from logging import DEBUG, INFO +from pathlib import Path + +import flwr as fl +import hydra +import torch +from flwr.common import ndarrays_to_parameters +from flwr.common.logger import log +from hydra.core.hydra_config import HydraConfig + +from feddebug import utils +from feddebug.client import CNNFlowerClient +from feddebug.dataset import ClientsAndServerDatasets +from feddebug.models import initialize_model, test +from feddebug.strategy import FedAvgWithFedDebug + +utils.seed_everything(786) + + +def _fit_metrics_aggregation_fn(metrics): + """Aggregate metrics recieved from client.""" + log(INFO, ">> ------------------- Clients Metrics ------------- ") + all_logs = {} + for nk_points, metric_d in metrics: + cid = int(metric_d["cid"]) + temp_s = ( + f' Client {metric_d["cid"]}, Loss Train {metric_d["train_loss"]}, ' + f'Accuracy Train {metric_d["train_accuracy"]}, data_points = {nk_points}' + ) + all_logs[cid] = temp_s + + # sorted by client id from lowest to highest + for k in sorted(all_logs.keys()): + log(INFO, all_logs[k]) + return {"loss": 0.0, "accuracy": 0.0} + + +def run_simulation(cfg): + """Run the simulation.""" + if cfg.total_malicious_clients: + cfg.malicious_clients_ids = list(range(cfg.total_malicious_clients)) + + cfg.malicious_clients_ids = [f"{c}" for c in cfg.malicious_clients_ids] + + save_path = Path(HydraConfig.get().runtime.output_dir) + + exp_key = utils.set_exp_key(cfg) + + log(INFO, " *********** Starting Experiment: %s ***************", exp_key) + + log(DEBUG, "Simulation Configuration: %s", cfg) + + num_bugs = len(cfg.malicious_clients_ids) + ds_prep = ClientsAndServerDatasets(cfg) + ds_dict = ds_prep.get_data() + server_testdata = ds_dict["server_testdata"] + + round2gm_accs = [] + round2feddebug_accs = [] + + def _create_model(): + return initialize_model(cfg.model, cfg.dataset) + + def _get_fit_config(server_round): + return { + "server_round": server_round, + "local_epochs": cfg.client.epochs, + "batch_size": cfg.client.batch_size, + "lr": cfg.client.lr, + } + + def _get_client(cid): + """Give the new client.""" + client2data = ds_dict["client2data"] + + args = { + "cid": cid, + "model": _create_model(), + "client_data_train": client2data[cid], + "device": torch.device(cfg.device), + } + client = CNNFlowerClient(args).to_client() + return client + + def _eval_gm(server_round, parameters, config): + gm_model = _create_model() + utils.set_parameters(gm_model, parameters) + d_res = test(gm_model, server_testdata, device=cfg.device) + round2gm_accs.append(d_res["accuracy"]) + log(DEBUG, "config: %s", config) + return d_res["loss"], { + "accuracy": d_res["accuracy"], + "loss": d_res["loss"], + "round": server_round, + } + + def _callback_fed_debug_evaluate_fn(server_round, predicted_malicious_clients): + true_malicious_clients = cfg.malicious_clients_ids + + log(INFO, "***FedDebug Output Round %s ***", server_round) + log(INFO, "True Malicious Clients (Ground Truth) = %s", true_malicious_clients) + log(INFO, "Total Random Inputs = %s", cfg.feddebug.r_inputs) + localization_accuracy = utils.calculate_localization_accuracy( + true_malicious_clients, predicted_malicious_clients + ) + + mal_probs = { + c: v / cfg.feddebug.r_inputs for c, v in predicted_malicious_clients.items() + } + log(INFO, "Predicted Malicious Clients = %s", mal_probs) + log(INFO, "FedDebug Localization Accuracy = %s", localization_accuracy) + round2feddebug_accs.append(localization_accuracy) + + initial_net = _create_model() + strategy = FedAvgWithFedDebug( + num_bugs=num_bugs, + num_inputs=cfg.feddebug.r_inputs, + input_shape=server_testdata.dataset[0]["image"].clone().detach().shape, + na_t=cfg.feddebug.na_t, + device=cfg.device, + fast=cfg.feddebug.fast, + callback_create_model_fn=_create_model, + callback_fed_debug_evaluate_fn=_callback_fed_debug_evaluate_fn, + accept_failures=False, + fraction_fit=0, + fraction_evaluate=0.0, + min_fit_clients=cfg.clients_per_round, + min_evaluate_clients=0, + min_available_clients=cfg.num_clients, + initial_parameters=ndarrays_to_parameters( + ndarrays=utils.get_parameters(initial_net) + ), + evaluate_fn=_eval_gm, + on_fit_config_fn=_get_fit_config, + fit_metrics_aggregation_fn=_fit_metrics_aggregation_fn, + ) + + server_config = fl.server.ServerConfig(num_rounds=cfg.num_rounds) + + client_app = fl.client.ClientApp(client_fn=_get_client) + server_app = fl.server.ServerApp(config=server_config, strategy=strategy) + + fl.simulation.run_simulation( + server_app=server_app, + client_app=client_app, + num_supernodes=cfg.num_clients, + backend_config=utils.config_sim_resources(cfg), + ) + + utils.plot_metrics(round2gm_accs, round2feddebug_accs, cfg, save_path) + + log(INFO, "Training Complete for Experiment: %s", exp_key) + + +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg) -> None: + """Run the baseline.""" + start_time = time.time() + run_simulation(cfg) + log(INFO, "Total Time Taken: %s seconds", time.time() - start_time) + + +if __name__ == "__main__": + main() diff --git a/baselines/feddebug/feddebug/models.py b/baselines/feddebug/feddebug/models.py new file mode 100644 index 000000000000..1d5509f2620f --- /dev/null +++ b/baselines/feddebug/feddebug/models.py @@ -0,0 +1,146 @@ +"""Define our models, and training and eval functions. + +If your model is 100% off-the-shelf (e.g. directly from torchvision without requiring +modifications) you might be better off instantiating your model directly from the Hydra +config. In this way, swapping your model for another one can be done without changing +the python code at all +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + + +class LeNet(nn.Module): + """LeNet model.""" + + def __init__(self, config): + super().__init__() + self.conv1 = nn.Conv2d(config["channels"], 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, config["num_classes"]) + + def forward(self, x): + """Forward pass.""" + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +def _get_inputs_labels_from_batch(batch): + if "image" in batch: + return batch["image"], batch["label"] + x, y = batch + return x, y + + +def initialize_model(name, cfg): + """Initialize the model with the given name.""" + model_functions = { + "resnet18": lambda: torchvision.models.resnet18(weights="IMAGENET1K_V1"), + "resnet34": lambda: torchvision.models.resnet34(weights="IMAGENET1K_V1"), + "resnet50": lambda: torchvision.models.resnet50(weights="IMAGENET1K_V1"), + "resnet101": lambda: torchvision.models.resnet101(weights="IMAGENET1K_V1"), + "resnet152": lambda: torchvision.models.resnet152(weights="IMAGENET1K_V1"), + "densenet121": lambda: torchvision.models.densenet121(weights="IMAGENET1K_V1"), + "vgg16": lambda: torchvision.models.vgg16(weights="IMAGENET1K_V1"), + "lenet": lambda: LeNet( + {"channels": cfg.channels, "num_classes": cfg.num_classes} + ), + } + model = model_functions[name]() + # Modify model for grayscale input if necessary + if cfg.channels == 1: + if name.startswith("resnet"): + model.conv1 = torch.nn.Conv2d( + 1, 64, kernel_size=7, stride=2, padding=3, bias=False + ) + elif name == "densenet121": + model.features[0] = torch.nn.Conv2d( + 1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False + ) + elif name == "vgg16": + model.features[0] = torch.nn.Conv2d( + 1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1) + ) + + # Modify final layer to match the number of classes + if name.startswith("resnet"): + num_ftrs = model.fc.in_features + model.fc = torch.nn.Linear(num_ftrs, cfg.num_classes) + elif name == "densenet121": + num_ftrs = model.classifier.in_features + model.classifier = torch.nn.Linear(num_ftrs, cfg.num_classes) + elif name == "vgg16": + num_ftrs = model.classifier[-1].in_features + model.classifier[-1] = torch.nn.Linear(num_ftrs, cfg.num_classes) + + return model.cpu() + + +def _train(tconfig): + """Train the network on the training set.""" + trainloader = tconfig["train_data"] + net = tconfig["model"] + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(net.parameters(), lr=tconfig["lr"]) + net = net.to(tconfig["device"]).train() + epoch_loss = 0 + epoch_acc = 0 + for _epoch in range(tconfig["epochs"]): + correct, total, epoch_loss = 0, 0, 0.0 + for batch in trainloader: + images, labels = _get_inputs_labels_from_batch(batch) + images, labels = images.to(tconfig["device"]), labels.to(tconfig["device"]) + optimizer.zero_grad() + outputs = net(images) + loss = criterion(net(images), labels) + loss.backward() + optimizer.step() + # Metrics + epoch_loss += loss.item() + total += labels.size(0) + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + images = images.cpu() + labels = labels.cpu() + # break + epoch_loss /= total + epoch_acc = correct / total + net = net.cpu() + return {"train_loss": epoch_loss, "train_accuracy": epoch_acc} + + +def test(net, testloader, device): + """Evaluate the network on the entire test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, total, loss = 0, 0, 0.0 + net = net.to(device).eval() + with torch.no_grad(): + for batch in testloader: + images, labels = _get_inputs_labels_from_batch(batch) + images, labels = images.to(device), labels.to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + images = images.cpu() + labels = labels.cpu() + loss /= len(testloader.dataset) + accuracy = correct / total + net = net.cpu() + return {"loss": loss, "accuracy": accuracy} + + +def train_neural_network(tconfig): + """Train the neural network.""" + train_dict = _train(tconfig) + return train_dict diff --git a/baselines/feddebug/feddebug/neuron_activation.py b/baselines/feddebug/feddebug/neuron_activation.py new file mode 100644 index 000000000000..d7dfae49ed6f --- /dev/null +++ b/baselines/feddebug/feddebug/neuron_activation.py @@ -0,0 +1,59 @@ +"""The file contains the code to get the activations of all neurons.""" + +import torch +import torch.nn.functional as F + + +class NeuronActivation: + """Class to get the activations of all neurons in the model.""" + + def __init__(self): + self.hooks_storage = [] + + def _get_all_layers_in_neural_network(self, net): + layers = [] + for layer in net.children(): + if len(list(layer.children())) == 0 and isinstance( + layer, (torch.nn.Conv2d, torch.nn.Linear) + ): + layers.append(layer) + if len(list(layer.children())) > 0: + temp_layers = self._get_all_layers_in_neural_network(layer) + layers = layers + temp_layers + return layers + + def _get_input_and_output_of_layer(self, layer, input_t, output_t): + assert ( + len(input_t) == 1 + ), f"Hook, {layer.__class__.__name__} Expected 1 input, got {len(input_t)}" + self.hooks_storage.append(output_t.detach()) + + def _insert_hooks(self, layers): + all_hooks = [] + for layer in layers: + hook = layer.register_forward_hook(self._get_input_and_output_of_layer) + all_hooks.append(hook) + return all_hooks + + def get_neurons_activations(self, model, img): + """Return the activations of model for the given input.""" + layer2output = [] + layers = self._get_all_layers_in_neural_network(model) + hooks = self._insert_hooks(layers) + model(img) # forward pass and everything is stored in hooks_storage + for l_id in range(len(layers)): + activations = F.relu(self.hooks_storage[l_id]).cpu() + layer2output.append(activations) + _ = [h.remove() for h in hooks] # remove the hooks + self.hooks_storage = [] + neurons = ( + torch.cat([out.flatten() for out in layer2output]).flatten().detach().cpu() + ) + return neurons + + +def get_neurons_activations(model, img): + """Return the activations of all neurons in the model for the given input image.""" + model = model.eval() + neurons = NeuronActivation() + return neurons.get_neurons_activations(model, img) diff --git a/baselines/feddebug/feddebug/server.py b/baselines/feddebug/feddebug/server.py new file mode 100644 index 000000000000..2fd7d42cde5a --- /dev/null +++ b/baselines/feddebug/feddebug/server.py @@ -0,0 +1,5 @@ +"""Create global evaluation function. + +Optionally, also define a new Server class (please note this is not needed in most +settings). +""" diff --git a/baselines/feddebug/feddebug/strategy.py b/baselines/feddebug/feddebug/strategy.py new file mode 100644 index 000000000000..3b7533cc9b8a --- /dev/null +++ b/baselines/feddebug/feddebug/strategy.py @@ -0,0 +1,80 @@ +"""Optionally define a custom strategy. + +Needed only when the strategy is not yet implemented in Flower or because you want to +extend or modify the functionality of an existing strategy. +""" + +from collections import Counter + +import flwr as fl + +from feddebug import utils +from feddebug.differential_testing import differential_testing_fl_clients + + +class FedAvgWithFedDebug(fl.server.strategy.FedAvg): + """FedAvg with Differential Testing.""" + + def __init__( + self, + num_bugs, + num_inputs, + input_shape, + na_t, + device, + fast, + callback_create_model_fn, + callback_fed_debug_evaluate_fn, + *args, + **kwargs, + ): + """Initialize.""" + super().__init__(*args, **kwargs) + self.input_shape = input_shape + self.num_bugs = num_bugs + self.num_inputs = num_inputs + self.na_t = na_t + self.device = device + self.fast = fast + self.create_model_fn = callback_create_model_fn + self.callback_fed_debug_evaluate_fn = callback_fed_debug_evaluate_fn + + def aggregate_fit(self, server_round, results, failures): + """Aggregate clients updates.""" + potential_mal_clients = self._run_differential_testing_helper(results) + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + aggregated_metrics["potential_malicious_clients"] = potential_mal_clients + self.callback_fed_debug_evaluate_fn(server_round, potential_mal_clients) + return aggregated_parameters, aggregated_metrics + + def _get_model_from_parameters(self, parameters): + """Convert parameters to state_dict.""" + ndarr = fl.common.parameters_to_ndarrays(parameters) + temp_net = self.create_model_fn() + utils.set_parameters(temp_net, ndarr) + return temp_net + + def _run_differential_testing_helper(self, results): + client2model = { + fit_res.metrics["cid"]: self._get_model_from_parameters(fit_res.parameters) + for _, fit_res in results + } + predicted_faulty_clients_on_each_input = differential_testing_fl_clients( + client2model, + self.num_bugs, + self.num_inputs, + self.input_shape, + self.na_t, + self.fast, + self.device, + ) + mal_clients_dict = Counter( + [ + f"{e}" + for temp_set in predicted_faulty_clients_on_each_input + for e in temp_set + ] + ) + return dict(mal_clients_dict) diff --git a/baselines/feddebug/feddebug/utils.py b/baselines/feddebug/feddebug/utils.py new file mode 100644 index 000000000000..4e1cfdb8cbd8 --- /dev/null +++ b/baselines/feddebug/feddebug/utils.py @@ -0,0 +1,121 @@ +"""Define any utility function. + +They are not directly relevant to the other (more FL specific) python modules. For +example, you may define here things like: loading a model from a checkpoint, saving +results, plotting. +""" + +import random +from logging import INFO + +import matplotlib.pyplot as plt +import numpy as np +import torch +from flwr.common.logger import log +from torchvision import transforms + + +def seed_everything(seed=786): + """Seed everything.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + +def calculate_localization_accuracy(true_faulty_clients, predicted_faulty_clients): + """Calculate the fault localization accuracy.""" + true_preds = 0 + total = 0 + for client, predicted_faulty_count in predicted_faulty_clients.items(): + if client in true_faulty_clients: + true_preds += predicted_faulty_count + + total += predicted_faulty_count + + accuracy = (true_preds / total) * 100 + return accuracy + + +def create_transform(): + """Create the transform for the dataset.""" + tfms = transforms.Compose([transforms.Normalize((0.5,), (0.5,))]) + return tfms + + +def set_exp_key(cfg): + """Set the experiment key.""" + key = ( + f"{cfg.model}-{cfg.dataset.name}-" + f"faulty_clients[{cfg.malicious_clients_ids}]-" + f"noise_rate{cfg.noise_rate}-" + f"TClients{cfg.num_clients}-" + f"-clientsPerR{cfg.clients_per_round})" + f"-{cfg.distribution}" + f"-batch{cfg.client.batch_size}-epochs{cfg.client.epochs}-" + f"lr{cfg.client.lr}" + ) + return key + + +def config_sim_resources(cfg): + """Configure the resources for the simulation.""" + client_resources = {"num_cpus": cfg.client_resources.num_cpus} + if cfg.device == "cuda": + client_resources["num_gpus"] = cfg.client_resources.num_gpus + + init_args = {"num_cpus": cfg.total_cpus, "num_gpus": cfg.total_gpus} + backend_config = { + "client_resources": client_resources, + "init_args": init_args, + } + return backend_config + + +def get_parameters(model): + """Return model parameters as a list of NumPy ndarrays.""" + model = model.cpu() + return [val.cpu().detach().clone().numpy() for _, val in model.state_dict().items()] + + +def set_parameters(net, parameters): + """Set model parameters from a list of NumPy ndarrays.""" + net = net.cpu() + params_dict = zip(net.state_dict().keys(), parameters) + new_state_dict = {k: torch.from_numpy(v) for k, v in params_dict} + net.load_state_dict(new_state_dict, strict=True) + + +def plot_metrics(gm_accs, feddebug_accs, cfg, save_path): + """Plot the metrics with legend and save the plot.""" + fig, axis = plt.subplots( + figsize=(3.5, 2.5) + ) # Increase figure size for better readability + + # Convert accuracy to percentages + gm_accs = [x * 100 for x in gm_accs][1:] + + # Plot lines with distinct styles for better differentiation + axis.plot(gm_accs, label="Global Model", linestyle="-", linewidth=2) + axis.plot(feddebug_accs, label="FedDebug", linestyle="--", linewidth=2) + + # Set labels with font settings + axis.set_xlabel("Training Round", fontsize=12) + axis.set_ylabel("Accuracy (%)", fontsize=12) + + # Set title with font settings + title = f"{cfg.distribution}-{cfg.model}-{cfg.dataset.name}" + axis.set_title(title, fontsize=12) + + # Set legend with better positioning and font size + axis.legend(fontsize=12, loc="lower right", frameon=False) + # change the font family to serif and font.serif to Times + + # Tight layout to avoid clipping + fig.tight_layout() + + # Save the figure with a higher resolution for publication quality + graph_path = save_path / f"{title}.png" + + plt.savefig(graph_path, dpi=300, bbox_inches="tight") + plt.close() + log(INFO, "Saved plot at %s", graph_path) diff --git a/baselines/feddebug/pyproject.toml b/baselines/feddebug/pyproject.toml new file mode 100644 index 000000000000..bbc8f55777ea --- /dev/null +++ b/baselines/feddebug/pyproject.toml @@ -0,0 +1,140 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "feddebug" # <----- Ensure it matches the name of your baseline directory containing all the source code +version = "1.0.0" +description = "Flower Baselines" +license = "Apache-2.0" +authors = ["The Flower Authors "] +readme = "README.md" +homepage = "https://flower.ai" +repository = "https://github.com/adap/flower" +documentation = "https://flower.ai" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.8.15, <3.12.0" # don't change this +flwr = { extras = ["simulation"], version = "1.9.0" } +hydra-core = "1.3.2" # don't change this +torch = "2.2.2" +torchvision = "0.17.2" +flwr-datasets = "0.3.0" + +[tool.poetry.dev-dependencies] +isort = "==5.13.2" +black = "==24.2.0" +docformatter = "==1.7.5" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias,too-many-locals,too-many-arguments,too-many-instance-attributes" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators = "hydra.main.main" + +[tool.pylint.typecheck] +generated-members = "numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/benchmarks/flowertune-llm/evaluation/finance/README.md b/benchmarks/flowertune-llm/evaluation/finance/README.md index b5595433a238..15d2410b8ca4 100644 --- a/benchmarks/flowertune-llm/evaluation/finance/README.md +++ b/benchmarks/flowertune-llm/evaluation/finance/README.md @@ -27,6 +27,7 @@ huggingface-cli login ```bash python eval.py \ +--base-model-name-path=your-base-model-name \ # e.g., mistralai/Mistral-7B-v0.3 --peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 --run-name=fl \ # specified name for this run --batch-size=32 \ diff --git a/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py index 2b1a174e571f..f2dad1e056b8 100644 --- a/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py +++ b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py @@ -122,7 +122,10 @@ def inference(dataset, model, tokenizer, batch_size): **tokens, max_length=512, eos_token_id=tokenizer.eos_token_id ) res_sentences = [tokenizer.decode(i, skip_special_tokens=True) for i in res] - out_text = [o.split("Answer: ")[1] for o in res_sentences] + out_text = [ + o.split("Answer: ")[1] if len(o.split("Answer: ")) > 1 else "None" + for o in res_sentences + ] out_text_list += out_text torch.cuda.empty_cache() diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/README.md b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md index c3fd71da6ea2..5acd75285dd3 100644 --- a/benchmarks/flowertune-llm/evaluation/general-nlp/README.md +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md @@ -27,6 +27,7 @@ huggingface-cli login ```bash python eval.py \ +--base-model-name-path=your-base-model-name \ # e.g., mistralai/Mistral-7B-v0.3 --peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 --run-name=fl \ # specified name for this run --batch-size=16 \ diff --git a/benchmarks/flowertune-llm/evaluation/medical/README.md b/benchmarks/flowertune-llm/evaluation/medical/README.md index 628489ce8de6..6a519e8a7c54 100644 --- a/benchmarks/flowertune-llm/evaluation/medical/README.md +++ b/benchmarks/flowertune-llm/evaluation/medical/README.md @@ -27,6 +27,7 @@ huggingface-cli login ```bash python eval.py \ +--base-model-name-path=your-base-model-name \ # e.g., mistralai/Mistral-7B-v0.3 --peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 --run-name=fl \ # specified name for this run --batch-size=16 \ diff --git a/datasets/README.md b/datasets/README.md index 25db77233558..0d35d2e31b6a 100644 --- a/datasets/README.md +++ b/datasets/README.md @@ -6,7 +6,7 @@ ![Downloads](https://pepy.tech/badge/flwr-datasets) [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) -Flower Datasets (`flwr-datasets`) is a library to quickly and easily create datasets for federated learning, federated evaluation, and federated analytics. It was created by the `Flower Labs` team that also created Flower: A Friendly Federated Learning Framework. +Flower Datasets (`flwr-datasets`) is a library to quickly and easily create datasets for federated learning, federated evaluation, and federated analytics. It was created by the `Flower Labs` team that also created Flower: A Friendly Federated AI Framework. > [!TIP] diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index dcba63dd221c..92d59d7df370 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -38,7 +38,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "0.3.0" +release = "0.4.0" # -- General configuration --------------------------------------------------- diff --git a/datasets/doc/source/contributor-how-to-contribute-dataset.rst b/datasets/doc/source/contributor-how-to-contribute-dataset.rst new file mode 100644 index 000000000000..07a6ba6378f7 --- /dev/null +++ b/datasets/doc/source/contributor-how-to-contribute-dataset.rst @@ -0,0 +1,56 @@ +How to contribute a dataset +=========================== + +To make a dataset available in Flower Dataset (`flwr-datasets`), you need to add the dataset to `HuggingFace Hub `_ . + +This guide will explain the best practices we found when adding datasets ourselves and point to the HFs guides. To see the datasets added by Flower, visit https://huggingface.co/flwrlabs. + +Dataset contribution process +---------------------------- +The contribution contains three steps: first, on your development machine transform your dataset into a ``datasets.Dataset`` object, the preferred format for datasets in HF Hub; second, upload the dataset to HuggingFace Hub and detail it its readme how can be used with Flower Dataset; third, share your dataset with us and we will add it to the `recommended FL dataset list `_ + +Creating a dataset locally +^^^^^^^^^^^^^^^^^^^^^^^^^^ +You can create a local dataset directly using the `datasets` library or load it in any custom way and transform it to the `datasets.Dataset` from other Python objects. +To complete this step, we recommend reading our :doc:`how-to-use-with-local-data` guide or/and the `Create a dataset `_ guide from HF. + +.. tip:: + We recommend that you do not upload custom scripts to HuggingFace Hub; instead, create the dataset locally and upload the data, which will speed up the processing time each time the data set is downloaded. + +Contribution to the HuggingFace Hub +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Each dataset in the HF Hub is a Git repository with a specific structure and readme file, and HuggingFace provides an API to push the dataset and, alternatively, a user interface directly in the website to populate the information in the readme file. + +Contributions to the HuggingFace Hub come down to: + +1. creating an HF repository for the dataset. +2. uploading the dataset. +3. filling in the information in the readme file. + +To complete this step, follow this HF's guide `Share dataset to the Hub `_. + +Note that the push of the dataset is straightforward, and here's what it could look like: + +.. code-block:: python + + from datasets import Dataset + + # Example dataset + data = { + 'column1': [1, 2, 3], + 'column2': ['a', 'b', 'c'] + } + + # Create a Dataset object + dataset = Dataset.from_dict(data) + + # Push the dataset to the HuggingFace Hub + dataset.push_to_hub("you-hf-username/your-ds-name") + +To make the dataset easily accessible in FL we recommend adding the "Use in FL" section. Here's an example of how it is done in `one of our repos `_ for the cinic10 dataset. + +Increasing visibility of the dataset +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If you want the dataset listed in our `recommended FL dataset list `_ , please send a PR or ping us in `Slack `_ #contributions channel. + +That's it! You have successfully contributed a dataset to the HuggingFace Hub and made it available for FL community. Thank you for your contribution! \ No newline at end of file diff --git a/datasets/doc/source/how-to-install-flwr-datasets.rst b/datasets/doc/source/how-to-install-flwr-datasets.rst index 2068fc11da85..3f79daceb753 100644 --- a/datasets/doc/source/how-to-install-flwr-datasets.rst +++ b/datasets/doc/source/how-to-install-flwr-datasets.rst @@ -42,5 +42,5 @@ If everything worked, it should print the version of Flower Datasets to the comm .. code-block:: none - 0.3.0 + 0.4.0 diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index d6b51fc84ad6..55e4ea963453 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -1,7 +1,7 @@ Flower Datasets =============== -Flower Datasets (``flwr-datasets``) is a library that enables the quick and easy creation of datasets for federated learning/analytics/evaluation. It enables heterogeneity (non-iidness) simulation and division of datasets with the preexisting notion of IDs. The library was created by the ``Flower Labs`` team that also created `Flower `_ : A Friendly Federated Learning Framework. +Flower Datasets (``flwr-datasets``) is a library that enables the quick and easy creation of datasets for federated learning/analytics/evaluation. It enables heterogeneity (non-iidness) simulation and division of datasets with the preexisting notion of IDs. The library was created by the ``Flower Labs`` team that also created `Flower `_ : A Friendly Federated AI Framework. Try out an interactive demo to generate code and visualize heterogeneous divisions at the :ref:`bottom of this page`. @@ -63,20 +63,28 @@ Information-oriented API reference and other reference material. :maxdepth: 1 :caption: Reference docs + recommended-fl-datasets ref-telemetry +.. toctree:: + :maxdepth: 1 + :caption: Contributor tutorials + + contributor-how-to-contribute-dataset + + Main features ------------- Flower Datasets library supports: - **Downloading datasets** - choose the dataset from Hugging Face's ``dataset`` (`link `_)(*) -- **Partitioning datasets** - choose one of the implemented partitioning scheme or create your own. +- **Partitioning datasets** - choose one of the implemented partitioning schemes or create your own. - **Creating centralized datasets** - leave parts of the dataset unpartitioned (e.g. for centralized evaluation) - **Visualization of the partitioned datasets** - visualize the label distribution of the partitioned dataset (and compare the results on different parameters of the same partitioning schemes, different datasets, different partitioning schemes, or any mix of them) .. note:: - (*) Once the dataset is available on HuggingFace Hub it can be **immediately** used in ``Flower Datasets`` (no approval from the Flower team needed, no custom code needed). + (*) Once the dataset is available on HuggingFace Hub, it can be **immediately** used in ``Flower Datasets`` without requiring approval from the Flower team or the need for custom code. .. image:: ./_static/readme/comparison_of_partitioning_schemes.png @@ -93,7 +101,7 @@ Thanks to using Hugging Face's ``datasets`` used under the hood, Flower Datasets - Jax - Arrow -Here are a few of the ``Partitioner`` s that are available: (for a full list see `link `_ ) +Here are a few of the ``Partitioners`` that are available: (for a full list see `link `_ ) * Partitioner (the abstract base class) ``Partitioner`` * IID partitioning ``IidPartitioner(num_partitions)`` @@ -119,7 +127,7 @@ What makes Flower Datasets stand out from other libraries? * Access to the largest online repository of datasets: - * The library functionality is independent of the dataset, so you can use any dataset available on `🤗Hugging Face Datasets `_, which means that others can immediately benefit from the dataset you added. + * The library functionality is independent of the dataset, so you can use any dataset available on `🤗Hugging Face Datasets `_. This means that others can immediately benefit from the dataset you added. * Out-of-the-box reproducibility across different projects. diff --git a/datasets/doc/source/recommended-fl-datasets.rst b/datasets/doc/source/recommended-fl-datasets.rst new file mode 100644 index 000000000000..92479bd0542a --- /dev/null +++ b/datasets/doc/source/recommended-fl-datasets.rst @@ -0,0 +1,167 @@ +Recommended FL Datasets +======================= + +This page lists the recommended datasets for federated learning research, which can be +used with Flower Datasets ``flwr-datasets``. To learn about the library, see the +`quickstart tutorial `_ . To +see the full FL example with Flower and Flower Datasets open the `quickstart-pytorch +`_. + +.. note:: + + All datasets from `HuggingFace Hub `_ can be used with our library. This page presents just a set of datasets we collected that you might find useful. + +For more information about any dataset, visit its page by clicking the dataset name. For more information how to use the + +Image Datasets +-------------- + +.. list-table:: Image Datasets + :widths: 40 40 20 + :header-rows: 1 + + * - Name + - Size + - Image Shape + * - `ylecun/mnist `_ + - train 60k; + test 10k + - 28x28 + * - `uoft-cs/cifar10 `_ + - train 50k; + test 10k + - 32x32x3 + * - `uoft-cs/cifar100 `_ + - train 50k; + test 10k + - 32x32x3 + * - `zalando-datasets/fashion_mnist `_ + - train 60k; + test 10k + - 28x28 + * - `flwrlabs/femnist `_ + - train 814k + - 28x28 + * - `zh-plus/tiny-imagenet `_ + - train 100k; + valid 10k + - 64x64x3 + * - `flwrlabs/usps `_ + - train 7.3k; + test 2k + - 16x16 + * - `flwrlabs/pacs `_ + - train 10k + - 227x227 + * - `flwrlabs/cinic10 `_ + - train 90k; + valid 90k; + test 90k + - 32x32x3 + * - `flwrlabs/caltech101 `_ + - train 8.7k + - varies + * - `flwrlabs/office-home `_ + - train 15.6k + - varies + * - `flwrlabs/fed-isic2019 `_ + - train 18.6k; + test 4.7k + - varies + * - `ufldl-stanford/svhn `_ + - train 73.3k; + test 26k; + extra 531k + - 32x32x3 + * - `sasha/dog-food `_ + - train 2.1k; + test 0.9k + - varies + * - `Mike0307/MNIST-M `_ + - train 59k; + test 9k + - 32x32 + +Audio Datasets +-------------- + +.. list-table:: Audio Datasets + :widths: 35 30 15 + :header-rows: 1 + + * - Name + - Size + - Subset + * - `google/speech_commands `_ + - train 64.7k + - v0.01 + * - `google/speech_commands `_ + - train 105.8k + - v0.02 + * - `flwrlabs/ambient-acoustic-context `_ + - train 70.3k + - + * - `fixie-ai/common_voice_17_0 `_ + - varies + - 14 versions + * - `fixie-ai/librispeech_asr `_ + - varies + - clean/other + +Tabular Datasets +---------------- + +.. list-table:: Tabular Datasets + :widths: 35 30 + :header-rows: 1 + + * - Name + - Size + * - `scikit-learn/adult-census-income `_ + - train 32.6k + * - `jlh/uci-mushrooms `_ + - train 8.1k + * - `scikit-learn/iris `_ + - train 150 + +Text Datasets +------------- + +.. list-table:: Text Datasets + :widths: 40 30 30 + :header-rows: 1 + + * - Name + - Size + - Category + * - `sentiment140 `_ + - train 1.6M; + test 0.5k + - Sentiment + * - `google-research-datasets/mbpp `_ + - full 974; sanitized 427 + - General + * - `openai/openai_humaneval `_ + - test 164 + - General + * - `lukaemon/mmlu `_ + - varies + - General + * - `takala/financial_phrasebank `_ + - train 4.8k + - Financial + * - `pauri32/fiqa-2018 `_ + - train 0.9k; validation 0.1k; test 0.2k + - Financial + * - `zeroshot/twitter-financial-news-sentiment `_ + - train 9.5k; validation 2.4k + - Financial + * - `bigbio/pubmed_qa `_ + - train 2M; validation 11k + - Medical + * - `openlifescienceai/medmcqa `_ + - train 183k; validation 4.3k; test 6.2k + - Medical + * - `bigbio/med_qa `_ + - train 10.1k; test 1.3k; validation 1.3k + - Medical diff --git a/datasets/e2e/pytorch/pyproject.toml b/datasets/e2e/pytorch/pyproject.toml index 009ad2d74235..d42409ca1195 100644 --- a/datasets/e2e/pytorch/pyproject.toml +++ b/datasets/e2e/pytorch/pyproject.toml @@ -9,8 +9,8 @@ description = "Flower Datasets with PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" flwr-datasets = { path = "./../../", extras = ["vision"] } -torch = "^1.12.0" -torchvision = "^0.14.1" +torch = ">=1.12.0,<3.0.0" +torchvision = ">=0.19.0,<1.0.0" parameterized = "==0.9.0" diff --git a/datasets/e2e/scikit-learn/pyproject.toml b/datasets/e2e/scikit-learn/pyproject.toml index 48356e4a945f..ca7fb45d82be 100644 --- a/datasets/e2e/scikit-learn/pyproject.toml +++ b/datasets/e2e/scikit-learn/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with scikit-learn" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" flwr-datasets = { path = "./../../", extras = ["vision"] } scikit-learn = "^1.2.0" parameterized = "==0.9.0" diff --git a/datasets/e2e/tensorflow/pyproject.toml b/datasets/e2e/tensorflow/pyproject.toml index dbb6720219b2..fbfc8eb89451 100644 --- a/datasets/e2e/tensorflow/pyproject.toml +++ b/datasets/e2e/tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with TensorFlow" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr-datasets = { path = "./../../", extras = ["vision"] } tensorflow-cpu = "^2.9.1, !=2.11.1" tensorflow-io-gcs-filesystem = "<0.35.0" diff --git a/datasets/flwr_datasets/__init__.py b/datasets/flwr_datasets/__init__.py index bd68fa43c606..dfae804046bc 100644 --- a/datasets/flwr_datasets/__init__.py +++ b/datasets/flwr_datasets/__init__.py @@ -15,7 +15,7 @@ """Flower Datasets main package.""" -from flwr_datasets import partitioner, preprocessor +from flwr_datasets import metrics, partitioner, preprocessor from flwr_datasets import utils as utils from flwr_datasets import visualization from flwr_datasets.common.version import package_version as _package_version diff --git a/datasets/flwr_datasets/common/typing.py b/datasets/flwr_datasets/common/typing.py index d6d37b468494..6b76e7b22eea 100644 --- a/datasets/flwr_datasets/common/typing.py +++ b/datasets/flwr_datasets/common/typing.py @@ -22,5 +22,5 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] -NDArrayFloat = npt.NDArray[np.float_] +NDArrayFloat = npt.NDArray[np.float64] NDArrays = list[NDArray] diff --git a/datasets/flwr_datasets/mock_utils_test.py b/datasets/flwr_datasets/mock_utils_test.py index 0976166648eb..acfa4b16e4ee 100644 --- a/datasets/flwr_datasets/mock_utils_test.py +++ b/datasets/flwr_datasets/mock_utils_test.py @@ -409,7 +409,11 @@ def _load_mocked_dataset_by_partial_download( The dataset with the requested samples. """ dataset = datasets.load_dataset( - dataset_name, name=subset_name, split=split_name, streaming=True + dataset_name, + name=subset_name, + split=split_name, + streaming=True, + trust_remote_code=True, ) dataset_list = [] # It's a list of dict such that each dict represent a single sample of the dataset diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py index ed38e8ee2a41..693e0d6a5aa6 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py @@ -29,7 +29,7 @@ def _dummy_setup( num_partitions: int, - alpha: Union[float, NDArray[np.float_]], + alpha: Union[float, NDArray[np.float64]], num_rows: int, partition_by: str, self_balancing: bool = True, diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner.py b/datasets/flwr_datasets/partitioner/distribution_partitioner.py index 86be62b36070..e4182f587cad 100644 --- a/datasets/flwr_datasets/partitioner/distribution_partitioner.py +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner.py @@ -36,21 +36,22 @@ class DistributionPartitioner(Partitioner): # pylint: disable=R0902 in a deterministic pathological manner. The 1st dimension is the number of unique labels and the 2nd-dimension is the number of buckets into which the samples associated with each label will be divided. That is, given a distribution array of - shape, - `num_unique_labels_per_partition` x `num_partitions` - ( `num_unique_labels`, ---------------------------------------------------- ), - `num_unique_labels` - the label_id at the i'th row is assigned to the partition_id based on the following - approach. - - First, for an i'th row, generate a list of `id`s according to the formula: - id = alpha + beta - where, - alpha = (i - num_unique_labels_per_partition + 1) \ - + (j % num_unique_labels_per_partition), - alpha = alpha + (alpha >= 0 ? 0 : num_unique_labels), - beta = num_unique_labels * (j // num_unique_labels_per_partition) - and j in {0, 1, 2, ..., `num_columns`}. Then, sort the list of `id`s in ascending + shape,:: + + `num_unique_labels_per_partition` x `num_partitions` + ( `num_unique_labels`, ---------------------------------------------------- ), + `num_unique_labels` + the label_id at the i'th row is assigned to the partition_id based on the + following approach. + + First, for an i'th row, generate a list of `id`s according to the formula: + id = alpha + beta + where, + alpha = (i - num_unique_labels_per_partition + 1) + + + (j % num_unique_labels_per_partition), + alpha = alpha + (alpha >= 0 ? 0 : num_unique_labels), + beta = num_unique_labels * (j // num_unique_labels_per_partition) + and j in {0, 1, 2, ..., `num_columns`}. Then, sort the list of `id` s in ascending order. The j'th index in this sorted list corresponds to the partition_id that the i'th unique label (and the underlying distribution array value) will be assigned to. So, for a dataset with 10 unique labels and a configuration with 20 partitions and diff --git a/datasets/flwr_datasets/visualization/bar_plot.py b/datasets/flwr_datasets/visualization/bar_plot.py index 2b09fb189c7a..0f6936976fc0 100644 --- a/datasets/flwr_datasets/visualization/bar_plot.py +++ b/datasets/flwr_datasets/visualization/bar_plot.py @@ -22,6 +22,7 @@ from matplotlib import colors as mcolors from matplotlib import pyplot as plt from matplotlib.axes import Axes +from matplotlib.figure import Figure # pylint: disable=too-many-arguments,too-many-locals,too-many-branches @@ -82,10 +83,11 @@ def _plot_bar( if "stacked" not in plot_kwargs: plot_kwargs["stacked"] = True - axis = dataframe.plot( + axis_df: Axes = dataframe.plot( ax=axis, **plot_kwargs, ) + assert axis_df is not None, "axis is None after plotting using DataFrame.plot()" if legend: if legend_kwargs is None: @@ -104,20 +106,22 @@ def _plot_bar( shift = min(0.05 + max_len_label_str / 100, 0.15) legend_kwargs["bbox_to_anchor"] = (1.0 + shift, 0.5) - handles, legend_labels = axis.get_legend_handles_labels() - _ = axis.figure.legend( + handles, legend_labels = axis_df.get_legend_handles_labels() + figure = axis_df.figure + assert isinstance(figure, Figure), "figure extraction from axes is not a Figure" + _ = figure.legend( handles=handles[::-1], labels=legend_labels[::-1], **legend_kwargs ) # Heuristic to make the partition id on xticks non-overlapping if partition_id_axis == "x": - xticklabels = axis.get_xticklabels() + xticklabels = axis_df.get_xticklabels() if len(xticklabels) > 20: # Make every other xtick label not visible for i, label in enumerate(xticklabels): if i % 2 == 1: label.set_visible(False) - return axis + return axis_df def _initialize_figsize( diff --git a/datasets/flwr_datasets/visualization/comparison_label_distribution.py b/datasets/flwr_datasets/visualization/comparison_label_distribution.py index 17b9a9aec251..c741ddee219e 100644 --- a/datasets/flwr_datasets/visualization/comparison_label_distribution.py +++ b/datasets/flwr_datasets/visualization/comparison_label_distribution.py @@ -30,6 +30,7 @@ # pylint: disable=too-many-arguments,too-many-locals +# mypy: disable-error-code="call-overload" def plot_comparison_label_distribution( partitioner_list: list[Partitioner], label_name: Union[str, list[str]], @@ -153,7 +154,11 @@ def plot_comparison_label_distribution( figsize = _initialize_comparison_figsize(figsize, num_partitioners) axes_sharing = _initialize_axis_sharing(size_unit, plot_type, partition_id_axis) fig, axes = plt.subplots( - 1, num_partitioners, layout="constrained", figsize=figsize, **axes_sharing + nrows=1, + ncols=num_partitioners, + figsize=figsize, + layout="constrained", + **axes_sharing, ) if titles is None: diff --git a/datasets/flwr_datasets/visualization/label_distribution.py b/datasets/flwr_datasets/visualization/label_distribution.py index b1183c225b86..550a4ecae725 100644 --- a/datasets/flwr_datasets/visualization/label_distribution.py +++ b/datasets/flwr_datasets/visualization/label_distribution.py @@ -245,5 +245,7 @@ def plot_label_distributions( plot_kwargs, legend_kwargs, ) - assert axis is not None - return axis.figure, axis, dataframe + assert axis is not None, "axis is None after plotting" + figure = axis.figure + assert isinstance(figure, Figure), "figure extraction from axes is not a Figure" + return figure, axis, dataframe diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 73523af2039e..2d699c5e901b 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr-datasets" -version = "0.3.0" +version = "0.4.0" description = "Flower Datasets" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -51,14 +51,13 @@ exclude = [ ] [tool.poetry.dependencies] -python = "^3.8" -numpy = "^1.21.0" -datasets = ">=2.14.6 <2.20.0" +python = "^3.9" +numpy = ">=1.26.0,<3.0.0" +datasets = ">=2.14.6 <=3.1.0" pillow = { version = ">=6.2.1", optional = true } soundfile = { version = ">=0.12.1", optional = true } librosa = { version = ">=0.10.0.post2", optional = true } tqdm ="^4.66.1" -pyarrow = "==16.1.0" matplotlib = "^3.7.5" seaborn = "^0.13.0" diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py index 52c96e3cca7a..9d255ac5471f 100644 --- a/dev/build-docker-image-matrix.py +++ b/dev/build-docker-image-matrix.py @@ -1,13 +1,36 @@ """ -Usage: python dev/build-docker-image-matrix.py --flwr-version +Usage: python dev/build-docker-image-matrix.py --flwr-version + +Images are built in three workflows: stable, nightly, and unstable (main). +Each builds for `amd64` and `arm64`. + +1. **Ubuntu Images**: + - Used for images where dependencies might be installed by users. + - Ubuntu uses `glibc`, compatible with most ML frameworks. + +2. **Alpine Images**: + - Used only for minimal images (e.g., SuperLink) where no extra dependencies are expected. + - Limited use due to dependency (in particular ML frameworks) compilation complexity with `musl`. + +Workflow Details: +- **Stable Release**: Triggered on new releases. Builds full matrix (all Python versions, Ubuntu and Alpine). +- **Nightly Release**: Daily trigger. Builds full matrix (latest Python, Ubuntu only). +- **Unstable**: Triggered on main branch commits. Builds simplified matrix (latest Python, Ubuntu only). """ +import sys import argparse import json -from dataclasses import asdict, dataclass +from dataclasses import asdict, dataclass, field from enum import Enum from typing import Any, Callable, Dict, List, Optional +# when we switch to Python 3.11 in the ci, we need to change the DistroName to: +# class DistroName(StrEnum): +# ALPINE = "alpine" +# UBUNTU = "ubuntu" +assert sys.version_info < (3, 11), "Script requires Python 3.9 or lower." + class DistroName(str, Enum): ALPINE = "alpine" @@ -31,36 +54,91 @@ class Distro: @dataclass -class BaseImage: +class Variant: distro: Distro - python_version: str - namespace_repository: str - file_dir: str - tag: str - flwr_version: str + extras: Optional[Any] = None -def new_base_image( - flwr_version: str, python_version: str, distro: Distro -) -> Dict[str, Any]: - return BaseImage( - distro, - python_version, - "flwr/base", - f"{DOCKERFILE_ROOT}/base/{distro.name.value}", - f"{flwr_version}-py{python_version}-{distro.name.value}{distro.version}", - flwr_version, +@dataclass +class CpuVariant: + pass + + +@dataclass +class CudaVariant: + version: str + + +CUDA_VERSIONS_CONFIG = [ + ("11.2.2", "20.04"), + ("11.8.0", "22.04"), + ("12.1.0", "22.04"), + ("12.3.2", "22.04"), +] +LATEST_SUPPORTED_CUDA_VERSION = Variant( + Distro(DistroName.UBUNTU, "22.04"), + CudaVariant(version="12.4.1"), +) + +# ubuntu base image +UBUNTU_VARIANT = Variant( + Distro(DistroName.UBUNTU, "24.04"), + CpuVariant(), +) + + +# alpine base image +ALPINE_VARIANT = Variant( + Distro(DistroName.ALPINE, "3.19"), + CpuVariant(), +) + + +# ubuntu cuda base images +CUDA_VARIANTS = [ + Variant( + Distro(DistroName.UBUNTU, ubuntu_version), + CudaVariant(version=cuda_version), ) + for (cuda_version, ubuntu_version) in CUDA_VERSIONS_CONFIG +] + [LATEST_SUPPORTED_CUDA_VERSION] -def generate_base_images( - flwr_version: str, python_versions: List[str], distros: List[Dict[str, str]] -) -> List[Dict[str, Any]]: - return [ - new_base_image(flwr_version, python_version, distro) - for distro in distros - for python_version in python_versions - ] +def remove_patch_version(version: str) -> str: + return ".".join(version.split(".")[0:2]) + + +@dataclass +class BaseImageBuilder: + file_dir_fn: Callable[[Any], str] + tags_fn: Callable[[Any], list[str]] + build_args_fn: Callable[[Any], str] + build_args: Any + tags: list[str] = field(init=False) + file_dir: str = field(init=False) + tags_encoded: str = field(init=False) + build_args_encoded: str = field(init=False) + + +@dataclass +class BaseImage(BaseImageBuilder): + namespace_repository: str = "flwr/base" + + @property + def file_dir(self) -> str: + return self.file_dir_fn(self.build_args) + + @property + def tags(self) -> str: + return self.tags_fn(self.build_args) + + @property + def tags_encoded(self) -> str: + return "\n".join(self.tags) + + @property + def build_args_encoded(self) -> str: + return self.build_args_fn(self.build_args) @dataclass @@ -68,7 +146,7 @@ class BinaryImage: namespace_repository: str file_dir: str base_image: str - tags: List[str] + tags_encoded: str def new_binary_image( @@ -83,7 +161,7 @@ def new_binary_image( return BinaryImage( f"flwr/{name}", f"{DOCKERFILE_ROOT}/{name}", - base_image.tag, + base_image.tags[0], "\n".join(tags), ) @@ -103,47 +181,95 @@ def generate_binary_images( def tag_latest_alpine_with_flwr_version(image: BaseImage) -> List[str]: if ( - image.distro.name == DistroName.ALPINE - and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + image.build_args.variant.distro.name == DistroName.ALPINE + and image.build_args.python_version == LATEST_SUPPORTED_PYTHON_VERSION ): - return [image.tag, image.flwr_version] + return image.tags + [image.build_args.flwr_version] else: - return [image.tag] + return image.tags def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: if ( - image.distro.name == DistroName.UBUNTU - and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + image.build_args.variant.distro.name == DistroName.UBUNTU + and image.build_args.python_version == LATEST_SUPPORTED_PYTHON_VERSION + and isinstance(image.build_args.variant.extras, CpuVariant) ): - return [image.tag, image.flwr_version] + return image.tags + [image.build_args.flwr_version] else: - return [image.tag] + return image.tags + + +# +# Build matrix for stable releases +# +def build_stable_matrix(flwr_version: str) -> List[BaseImage]: + @dataclass + class StableBaseImageBuildArgs: + variant: Variant + python_version: str + flwr_version: str + + cpu_build_args = """PYTHON_VERSION={python_version} +FLWR_VERSION={flwr_version} +DISTRO={distro_name} +DISTRO_VERSION={distro_version} +""" + cpu_build_args_variants = [ + StableBaseImageBuildArgs(UBUNTU_VARIANT, python_version, flwr_version) + for python_version in SUPPORTED_PYTHON_VERSIONS + ] + [ + StableBaseImageBuildArgs( + ALPINE_VARIANT, LATEST_SUPPORTED_PYTHON_VERSION, flwr_version + ) + ] -if __name__ == "__main__": - arg_parser = argparse.ArgumentParser( - description="Generate Github Docker workflow matrix" - ) - arg_parser.add_argument("--flwr-version", type=str, required=True) - args = arg_parser.parse_args() + cpu_base_images = [ + BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}", + tags_fn=lambda args: [ + f"{args.flwr_version}-py{args.python_version}-{args.variant.distro.name.value}{args.variant.distro.version}" + ], + build_args_fn=lambda args: cpu_build_args.format( + python_version=args.python_version, + flwr_version=args.flwr_version, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + ), + build_args=build_args_variant, + ) + for build_args_variant in cpu_build_args_variants + ] - flwr_version = args.flwr_version + cuda_build_args_variants = [ + StableBaseImageBuildArgs(variant, python_version, flwr_version) + for variant in CUDA_VARIANTS + for python_version in SUPPORTED_PYTHON_VERSIONS + ] - # ubuntu base images for each supported python version - ubuntu_base_images = generate_base_images( - flwr_version, - SUPPORTED_PYTHON_VERSIONS, - [Distro(DistroName.UBUNTU, "24.04")], - ) - # alpine base images for the latest supported python version - alpine_base_images = generate_base_images( - flwr_version, - [LATEST_SUPPORTED_PYTHON_VERSION], - [Distro(DistroName.ALPINE, "3.19")], - ) + cuda_build_args = cpu_build_args + """CUDA_VERSION={cuda_version}""" + + cuda_base_image = [ + BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}-cuda", + tags_fn=lambda args: [ + f"{args.flwr_version}-py{args.python_version}-cu{remove_patch_version(args.variant.extras.version)}-{args.variant.distro.name.value}{args.variant.distro.version}", + ], + build_args_fn=lambda args: cuda_build_args.format( + python_version=args.python_version, + flwr_version=args.flwr_version, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + cuda_version=args.variant.extras.version, + ), + build_args=build_args_variant, + ) + for build_args_variant in cuda_build_args_variants + ] - base_images = ubuntu_base_images + alpine_base_images + # base_images = cpu_base_images + cuda_base_image + base_images = cpu_base_images binary_images = ( # ubuntu and alpine images for the latest supported python version @@ -151,17 +277,22 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: "superlink", base_images, tag_latest_alpine_with_flwr_version, - lambda image: image.python_version == LATEST_SUPPORTED_PYTHON_VERSION, + lambda image: image.build_args.python_version + == LATEST_SUPPORTED_PYTHON_VERSION + and isinstance(image.build_args.variant.extras, CpuVariant), ) # ubuntu images for each supported python version + generate_binary_images( "supernode", base_images, tag_latest_alpine_with_flwr_version, - lambda image: image.distro.name == DistroName.UBUNTU + lambda image: ( + image.build_args.variant.distro.name == DistroName.UBUNTU + and isinstance(image.build_args.variant.extras, CpuVariant) + ) or ( - image.distro.name == DistroName.ALPINE - and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + image.build_args.variant.distro.name == DistroName.ALPINE + and image.build_args.python_version == LATEST_SUPPORTED_PYTHON_VERSION ), ) # ubuntu images for each supported python version @@ -169,28 +300,216 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: "serverapp", base_images, tag_latest_ubuntu_with_flwr_version, - lambda image: image.distro.name == DistroName.UBUNTU, + lambda image: image.build_args.variant.distro.name == DistroName.UBUNTU, ) # ubuntu images for each supported python version + generate_binary_images( - "superexec", + "clientapp", base_images, tag_latest_ubuntu_with_flwr_version, - lambda image: image.distro.name == DistroName.UBUNTU, + lambda image: image.build_args.variant.distro.name == DistroName.UBUNTU, + ) + ) + + return base_images, binary_images + + +# +# Build matrix for unstable releases +# +def build_unstable_matrix(flwr_version_ref: str) -> List[BaseImage]: + @dataclass + class UnstableBaseImageBuildArgs: + variant: Variant + python_version: str + flwr_version_ref: str + + cpu_ubuntu_build_args_variant = UnstableBaseImageBuildArgs( + UBUNTU_VARIANT, LATEST_SUPPORTED_PYTHON_VERSION, flwr_version_ref + ) + + cpu_build_args = """PYTHON_VERSION={python_version} +FLWR_VERSION_REF={flwr_version_ref} +DISTRO={distro_name} +DISTRO_VERSION={distro_version} +""" + + cpu_base_image = BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}", + tags_fn=lambda _: ["unstable"], + build_args_fn=lambda args: cpu_build_args.format( + python_version=args.python_version, + flwr_version_ref=args.flwr_version_ref, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + ), + build_args=cpu_ubuntu_build_args_variant, + ) + + cuda_build_args_variant = UnstableBaseImageBuildArgs( + LATEST_SUPPORTED_CUDA_VERSION, LATEST_SUPPORTED_PYTHON_VERSION, flwr_version_ref + ) + + cuda_build_args = cpu_build_args + """CUDA_VERSION={cuda_version}""" + + cuda_base_image = BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}-cuda", + tags_fn=lambda _: ["unstable-cuda"], + build_args_fn=lambda args: cuda_build_args.format( + python_version=args.python_version, + flwr_version_ref=args.flwr_version_ref, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + cuda_version=args.variant.extras.version, + ), + build_args=cuda_build_args_variant, + ) + + # base_images = [cpu_base_image, cuda_base_image] + base_images = [cpu_base_image] + + binary_images = ( + generate_binary_images( + "superlink", + base_images, + lambda image: image.tags, + lambda image: isinstance(image.build_args.variant.extras, CpuVariant), ) - # ubuntu images for each supported python version + generate_binary_images( - "clientapp", + "supernode", base_images, - tag_latest_ubuntu_with_flwr_version, - lambda image: image.distro.name == DistroName.UBUNTU, + lambda image: image.tags, + lambda image: isinstance(image.build_args.variant.extras, CpuVariant), ) + + generate_binary_images("serverapp", base_images, lambda image: image.tags) + + generate_binary_images("clientapp", base_images, lambda image: image.tags) + ) + + return base_images, binary_images + + +# +# Build matrix for nightly releases +# +def build_nightly_matrix(flwr_version: str, flwr_package: str) -> List[BaseImage]: + @dataclass + class NightlyBaseImageBuildArgs: + variant: Variant + python_version: str + flwr_version: str + flwr_package: str + + cpu_ubuntu_build_args_variant = NightlyBaseImageBuildArgs( + UBUNTU_VARIANT, LATEST_SUPPORTED_PYTHON_VERSION, flwr_version, flwr_package ) + cpu_build_args = """PYTHON_VERSION={python_version} +FLWR_VERSION={flwr_version} +FLWR_PACKAGE={flwr_package} +DISTRO={distro_name} +DISTRO_VERSION={distro_version} +""" + + cpu_base_image = BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}", + tags_fn=lambda args: [args.flwr_version, "nightly"], + build_args_fn=lambda args: cpu_build_args.format( + python_version=args.python_version, + flwr_version=args.flwr_version, + flwr_package=args.flwr_package, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + ), + build_args=cpu_ubuntu_build_args_variant, + ) + + cuda_build_args_variant = NightlyBaseImageBuildArgs( + LATEST_SUPPORTED_CUDA_VERSION, + LATEST_SUPPORTED_PYTHON_VERSION, + flwr_version, + flwr_package, + ) + + cuda_build_args = cpu_build_args + """CUDA_VERSION={cuda_version}""" + + cuda_base_image = BaseImage( + file_dir_fn=lambda args: f"{DOCKERFILE_ROOT}/base/{args.variant.distro.name.value}-cuda", + tags_fn=lambda args: [f"{args.flwr_version}-cuda", "nightly-cuda"], + build_args_fn=lambda args: cuda_build_args.format( + python_version=args.python_version, + flwr_version=args.flwr_version, + flwr_package=args.flwr_package, + distro_name=args.variant.distro.name, + distro_version=args.variant.distro.version, + cuda_version=args.variant.extras.version, + ), + build_args=cuda_build_args_variant, + ) + + # base_images = [cpu_base_image, cuda_base_image] + base_images = [cpu_base_image] + + binary_images = ( + generate_binary_images( + "superlink", + base_images, + lambda image: image.tags, + lambda image: isinstance(image.build_args.variant.extras, CpuVariant), + ) + + generate_binary_images( + "supernode", + base_images, + lambda image: image.tags, + lambda image: isinstance(image.build_args.variant.extras, CpuVariant), + ) + + generate_binary_images("serverapp", base_images, lambda image: image.tags) + + generate_binary_images("clientapp", base_images, lambda image: image.tags) + ) + + return base_images, binary_images + + +if __name__ == "__main__": + arg_parser = argparse.ArgumentParser( + description="Generate Github Docker workflow matrix" + ) + arg_parser.add_argument("--flwr-version", type=str, required=True) + arg_parser.add_argument("--flwr-package", type=str, default="flwr") + arg_parser.add_argument( + "--matrix", choices=["stable", "nightly", "unstable"], default="stable" + ) + + args = arg_parser.parse_args() + + flwr_version = args.flwr_version + flwr_package = args.flwr_package + matrix = args.matrix + + if matrix == "stable": + base_images, binary_images = build_stable_matrix(flwr_version) + elif matrix == "nightly": + base_images, binary_images = build_nightly_matrix(flwr_version, flwr_package) + else: + base_images, binary_images = build_unstable_matrix(flwr_version) + print( json.dumps( { - "base": {"images": list(map(lambda image: asdict(image), base_images))}, + "base": { + "images": list( + map( + lambda image: asdict( + image, + dict_factory=lambda x: { + k: v + for (k, v) in x + if v is not None and callable(v) is False + }, + ), + base_images, + ) + ) + }, "binary": { "images": list(map(lambda image: asdict(image), binary_images)) }, diff --git a/dev/build-example-docs.py b/dev/build-example-docs.py index 772a26272fd7..05656967bbbd 100644 --- a/dev/build-example-docs.py +++ b/dev/build-example-docs.py @@ -28,7 +28,7 @@ ----------------------------- Welcome to Flower Examples' documentation. `Flower `_ is -a friendly federated learning framework. +a friendly federated AI framework. Join the Flower Community ------------------------- diff --git a/dev/prepare-release-changelog.sh b/dev/prepare-release-changelog.sh deleted file mode 100755 index 3f2a2ae325e9..000000000000 --- a/dev/prepare-release-changelog.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -# Get the current date in the format YYYY-MM-DD -current_date=$(date +"%Y-%m-%d") - -tags=$(git tag --sort=-v:refname) -new_version=$1 -old_version=$(echo "$tags" | sed -n '1p') - -shortlog=$(git shortlog "$old_version"..main -s | grep -vEi '(\(|\[)bot(\)|\])' | awk '{name = substr($0, index($0, $2)); printf "%s`%s`", sep, name; sep=", "} END {print ""}') - -token="" -thanks="\n### Thanks to our contributors\n\nWe would like to give our special thanks to all the contributors who made the new version of Flower possible (in \`git shortlog\` order):\n\n$shortlog $token" - -# Check if the token exists in the markdown file -if ! grep -q "$token" doc/source/ref-changelog.md; then - # If the token does not exist in the markdown file, append the new content after the version - awk -v version="$new_version" -v date="$current_date" -v text="$thanks" \ - '{ if ($0 ~ "## Unreleased") print "## " version " (" date ")\n" text; else print $0 }' doc/source/ref-changelog.md > temp.md && mv temp.md doc/source/ref-changelog.md -else - # If the token exists, replace the line containing the token with the new shortlog - awk -v token="$token" -v newlog="$shortlog $token" '{ if ($0 ~ token) print newlog; else print $0 }' doc/source/ref-changelog.md > temp.md && mv temp.md doc/source/ref-changelog.md -fi diff --git a/dev/swift-docs-resources/footer.html b/dev/swift-docs-resources/footer.html index 6a3a5492c83b..cb4dcbf44c1c 100644 --- a/dev/swift-docs-resources/footer.html +++ b/dev/swift-docs-resources/footer.html @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/dev/update_changelog.py b/dev/update_changelog.py new file mode 100644 index 000000000000..0b4359d90e13 --- /dev/null +++ b/dev/update_changelog.py @@ -0,0 +1,297 @@ +# mypy: ignore-errors +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Update the changelog using PR titles.""" + + +import pathlib +import re + +try: + import tomllib +except ModuleNotFoundError: + import tomli as tomllib +from datetime import date +from sys import argv +from typing import Optional + +from github import Github +from github.PullRequest import PullRequest +from github.Repository import Repository +from github.Tag import Tag + +REPO_NAME = "adap/flower" +CHANGELOG_FILE = "doc/source/ref-changelog.md" +CHANGELOG_SECTION_HEADER = "### Changelog entry" + +# Load the TOML configuration +with (pathlib.Path(__file__).parent.resolve() / "changelog_config.toml").open( + "rb" +) as file: + CONFIG = tomllib.load(file) + +# Extract types, project, and scope from the config +TYPES = "|".join(CONFIG["type"]) +PROJECTS = "|".join(CONFIG["project"]) + "|\\*" +SCOPE = CONFIG["scope"] +ALLOWED_VERBS = CONFIG["allowed_verbs"] + +# Construct the pattern +PATTERN_TEMPLATE = CONFIG["pattern_template"] +PATTERN = PATTERN_TEMPLATE.format(types=TYPES, projects=PROJECTS, scope=SCOPE) + + +def _get_latest_tag(gh_api: Github) -> tuple[Repository, Optional[Tag]]: + """Retrieve the latest tag from the GitHub repository.""" + repo = gh_api.get_repo(REPO_NAME) + tags = repo.get_tags() + return repo, tags[0] if tags.totalCount > 0 else None + + +def _add_shortlog(new_version: str, shortlog: str) -> None: + """Update the markdown file with the new version or update existing logs.""" + token = f"" + entry = ( + "\n### Thanks to our contributors\n\n" + "We would like to give our special thanks to all the contributors " + "who made the new version of Flower possible " + f"(in `git shortlog` order):\n\n{shortlog} {token}" + ) + current_date = date.today() + + with open(CHANGELOG_FILE, encoding="utf-8") as file: + content = file.readlines() + + token_exists = any(token in line for line in content) + + with open(CHANGELOG_FILE, "w", encoding="utf-8") as file: + for line in content: + if token in line: + token_exists = True + file.write(line) + elif "## Unreleased" in line and not token_exists: + # Add the new entry under "## Unreleased" + file.write(f"## {new_version} ({current_date})\n{entry}\n") + token_exists = True + else: + file.write(line) + + +def _get_pull_requests_since_tag( + repo: Repository, tag: Tag +) -> tuple[str, set[PullRequest]]: + """Get a list of pull requests merged into the main branch since a given tag.""" + commit_shas = set() + contributors = set() + prs = set() + + for commit in repo.compare(tag.commit.sha, "main").commits: + commit_shas.add(commit.sha) + if commit.author.name is None: + continue + if "[bot]" in commit.author.name: + continue + contributors.add(commit.author.name) + + for pr_info in repo.get_pulls( + state="closed", sort="created", direction="desc", base="main" + ): + if pr_info.merge_commit_sha in commit_shas: + prs.add(pr_info) + if len(prs) == len(commit_shas): + break + + shortlog = ", ".join([f"`{name}`" for name in sorted(contributors)]) + return shortlog, prs + + +def _format_pr_reference(title: str, number: int, url: str) -> str: + """Format a pull request reference as a markdown list item.""" + parts = title.strip().replace("*", "").split("`") + formatted_parts = [] + + for i, part in enumerate(parts): + if i % 2 == 0: + # Even index parts are normal text, ensure we do not add extra bold if empty + if part.strip(): + formatted_parts.append(f"**{part.strip()}**") + else: + formatted_parts.append("") + else: + # Odd index parts are inline code + formatted_parts.append(f"`{part.strip()}`") + + # Join parts with spaces but avoid extra spaces + formatted_title = " ".join(filter(None, formatted_parts)) + return f"- {formatted_title} ([#{number}]({url}))" + + +def _extract_changelog_entry( + pr_info: PullRequest, +) -> dict[str, str]: + """Extract the changelog entry from a pull request's body.""" + # Use regex search to find matches + match = re.search(PATTERN, pr_info.title) + if match: + # Extract components from the regex groups + pr_type = match.group(1) + pr_project = match.group(2) + pr_scope = match.group(3) # Correctly capture optional sub-scope + pr_subject = match.group( + 4 + ) # Capture subject starting with uppercase and no terminal period + return { + "type": pr_type, + "project": pr_project, + "scope": pr_scope, + "subject": pr_subject, + } + + return { + "type": "unknown", + "project": "unknown", + "scope": "unknown", + "subject": "unknown", + } + + +def _update_changelog(prs: set[PullRequest]) -> bool: + """Update the changelog file with entries from provided pull requests.""" + breaking_changes = False + unknown_changes = False + + with open(CHANGELOG_FILE, "r+", encoding="utf-8") as file: + content = file.read() + unreleased_index = content.find("## Unreleased") + + if unreleased_index == -1: + print("Unreleased header not found in the changelog.") + return False + + # Find the end of the Unreleased section + next_header_index = content.find("## ", unreleased_index + 1) + next_header_index = ( + next_header_index if next_header_index != -1 else len(content) + ) + + for pr_info in prs: + parsed_title = _extract_changelog_entry(pr_info) + + # Skip if PR should be skipped or already in changelog + if ( + parsed_title.get("scope", "unknown") == "skip" + or f"#{pr_info.number}]" in content + ): + continue + + pr_type = parsed_title.get("type", "unknown") + if pr_type == "feat": + insert_content_index = content.find("### What", unreleased_index + 1) + elif pr_type == "docs": + insert_content_index = content.find( + "### Documentation improvements", unreleased_index + 1 + ) + elif pr_type == "break": + breaking_changes = True + insert_content_index = content.find( + "### Incompatible changes", unreleased_index + 1 + ) + elif pr_type in {"ci", "fix", "refactor"}: + insert_content_index = content.find( + "### Other changes", unreleased_index + 1 + ) + else: + unknown_changes = True + insert_content_index = unreleased_index + + pr_reference = _format_pr_reference( + pr_info.title, pr_info.number, pr_info.html_url + ) + + content = _insert_entry_no_desc( + content, + pr_reference, + insert_content_index, + ) + + next_header_index = content.find("## ", unreleased_index + 1) + next_header_index = ( + next_header_index if next_header_index != -1 else len(content) + ) + + if unknown_changes: + content = _insert_entry_no_desc( + content, + "### Unknown changes", + unreleased_index, + ) + + if not breaking_changes: + content = _insert_entry_no_desc( + content, + "None", + content.find("### Incompatible changes", unreleased_index + 1), + ) + + # Finalize content update + file.seek(0) + file.write(content) + file.truncate() + return True + + +def _insert_entry_no_desc( + content: str, pr_reference: str, unreleased_index: int +) -> str: + """Insert a changelog entry for a pull request with no specific description.""" + insert_index = content.find("\n", unreleased_index) + 1 + content = ( + content[:insert_index] + "\n" + pr_reference + "\n" + content[insert_index:] + ) + return content + + +def _bump_minor_version(tag: Tag) -> Optional[str]: + """Bump the minor version of the tag.""" + match = re.match(r"v(\d+)\.(\d+)\.(\d+)", tag.name) + if match is None: + return None + major, minor, _ = [int(x) for x in match.groups()] + # Increment the minor version and reset patch version + new_version = f"v{major}.{minor + 1}.0" + return new_version + + +def main() -> None: + """Update changelog using the descriptions of PRs since the latest tag.""" + # Initialize GitHub Client with provided token (as argument) + gh_api = Github(argv[1]) + repo, latest_tag = _get_latest_tag(gh_api) + if not latest_tag: + print("No tags found in the repository.") + return + + shortlog, prs = _get_pull_requests_since_tag(repo, latest_tag) + if _update_changelog(prs): + new_version = _bump_minor_version(latest_tag) + if not new_version: + print("Wrong tag format.") + return + _add_shortlog(new_version, shortlog) + print("Changelog updated succesfully.") + + +if __name__ == "__main__": + main() diff --git a/dev/update_version.py b/dev/update_version.py index cbb4d8e138c2..0b2db3369a3d 100644 --- a/dev/update_version.py +++ b/dev/update_version.py @@ -13,9 +13,6 @@ "src/py/flwr/cli/new/templates/app/pyproject.*.toml.tpl": [ "flwr[simulation]>={version}", ], - "src/docker/complete/compose.yml": ["FLWR_VERSION:-{version}"], - "src/docker/distributed/client/compose.yml": ["FLWR_VERSION:-{version}"], - "src/docker/distributed/server/compose.yml": ["FLWR_VERSION:-{version}"], } REPLACE_NEXT_VERSION = { @@ -25,11 +22,13 @@ ], "examples/doc/source/conf.py": ['release = "{version}"'], "baselines/doc/source/conf.py": ['release = "{version}"'], + "src/docker/complete/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/client/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/server/compose.yml": ["FLWR_VERSION:-{version}"], } EXAMPLES = { "examples/*/pyproject.toml": [ - "flwr[simulation]=={version}", "flwr[simulation]>={version}", ], } @@ -103,7 +102,9 @@ def _update_versions(file_patterns, replace_strings, new_version, check): "--check", action="store_true", help="Fails if any file would be modified." ) parser.add_argument( - "--examples", action="store_true", help="Also modify flwr version in examples." + "--no_examples", + action="store_true", + help="Also modify flwr version in examples.", ) group = parser.add_mutually_exclusive_group() @@ -141,7 +142,7 @@ def _update_versions(file_patterns, replace_strings, new_version, check): if not _update_versions([file_pattern], strings, curr_version, args.check): wrong = True - if args.examples: + if not args.no_examples: for file_pattern, strings in EXAMPLES.items(): if not _update_versions([file_pattern], strings, curr_version, args.check): wrong = True diff --git a/doc/build-versioned-docs.sh b/doc/build-versioned-docs.sh index 772250865143..db7e766b4f83 100755 --- a/doc/build-versioned-docs.sh +++ b/doc/build-versioned-docs.sh @@ -22,11 +22,29 @@ languages="en `find locales/ -mindepth 1 -maxdepth 1 -type d -exec basename '{}' # Get a list of tags, excluding those before v1.0.0 versions="`git for-each-ref '--format=%(refname:lstrip=-1)' refs/tags/ | grep -iE '^v((([1-9]|[0-9]{2,}).*\.([8-9]|[0-9]{2,}).*)|([2-9]|[0-9]{2,}).*)$'`" +# Set the numpy version to use for v1.8.0 to v1.12.0 +numpy_version_1="1.26.4" +numpy_version_2=$(python -c "import numpy; print(numpy.__version__)") + for current_version in ${versions}; do # Make the current language available to conf.py export current_version git checkout --force ${current_version} + + # Downgrade numpy for versions between v1.8.0 and v1.12.0 to avoid conflicts in docs + if [ "$current_version" = "v1.8.0" ] || \ + [ "$current_version" = "v1.9.0" ] || \ + [ "$current_version" = "v1.10.0" ] || \ + [ "$current_version" = "v1.11.0" ] || \ + [ "$current_version" = "v1.11.1" ] || \ + [ "$current_version" = "v1.12.0" ]; then + echo "INFO: Using numpy version ${numpy_version_1} for ${current_version} docs" + pip install "numpy==${numpy_version_1}" + else + echo "INFO: Using numpy version ${numpy_version_2} for ${current_version} docs" + pip install "numpy==${numpy_version_2}" + fi echo "INFO: Building sites for ${current_version}" for current_language in ${languages}; do diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index a11f44f6bd59..40aaec1a1087 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"POT-Creation-Date: 2024-11-30 00:31+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -962,9 +962,9 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:13 msgid "" -"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " -"order to add every new change to the changelog (feel free to make manual " -"changes to the changelog afterwards until it looks good)." +"Run ``python3 ./dev/update_changelog.py `` in order to add" +" every new change to the changelog (feel free to make manual changes to " +"the changelog afterwards until it looks good)." msgstr "" #: ../../source/contributor-how-to-release-flower.rst:16 @@ -1353,10 +1353,10 @@ msgid "Where to start" msgstr "Par où commencer" #: ../../source/contributor-ref-good-first-contributions.rst:11 +#, fuzzy msgid "" -"Until the Flower core library matures it will be easier to get PR's " -"accepted if they only touch non-core areas of the codebase. Good " -"candidates to get started are:" +"In general, it is easier to get PR's accepted if they only touch non-core" +" areas of the codebase. Good candidates to get started are:" msgstr "" "Jusqu'à ce que la bibliothèque centrale de Flower arrive à maturité, il " "sera plus facile de faire accepter les RP s'ils ne touchent que des zones" @@ -1370,36 +1370,44 @@ msgstr "" "exprimé plus clairement ?" #: ../../source/contributor-ref-good-first-contributions.rst:15 +#, python-format +msgid "" +"Open issues: Issues with the tag `good first issue " +"`_." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Baselines: See below." msgstr "Références : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:16 +#: ../../source/contributor-ref-good-first-contributions.rst:18 msgid "Examples: See below." msgstr "Exemples : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:19 -msgid "Request for Flower Baselines" +#: ../../source/contributor-ref-good-first-contributions.rst:21 +#, fuzzy +msgid "Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/contributor-ref-good-first-contributions.rst:21 +#: ../../source/contributor-ref-good-first-contributions.rst:23 #, fuzzy msgid "" -"If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines " -"`_." +"If you are not familiar with Flower Baselines, please check our " +"`contributing guide for baselines `_." msgstr "" "Si tu n'es pas familier avec les Flower Baselines, tu devrais " "probablement consulter notre `guide de contribution pour les baselines " "`_." -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:26 #, fuzzy msgid "" -"You should then check out the open `issues " +"Then take a look at the open `issues " "`_" -" for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignees, feel free to assign it to yourself and start " -"working on it!" +" for baseline requests. If you find a baseline that you'd like to work " +"on, and it has no assignees, feel free to assign it to yourself and get " +"started!" msgstr "" "Tu devrais ensuite consulter les `issues ouvertes " "`_" @@ -1407,93 +1415,93 @@ msgstr "" " laquelle tu aimerais travailler et qui n'a pas d'assignés, n'hésite pas " "à te l'attribuer et à commencer à travailler dessus !" -#: ../../source/contributor-ref-good-first-contributions.rst:30 +#: ../../source/contributor-ref-good-first-contributions.rst:31 +#, fuzzy msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to" -" open a new issue with the baseline request template!" +"If you don't find the baseline you'd like to work on, be sure to open a " +"new issue with the baseline request template!" msgstr "" "Sinon, si tu ne trouves pas de ligne de base sur laquelle tu aimerais " "travailler, n'oublie pas d'ouvrir un nouveau problème à l'aide du modèle " "de demande de ligne de base !" -#: ../../source/contributor-ref-good-first-contributions.rst:34 -msgid "Request for examples" -msgstr "Demande pour un nouveau Flower Example" +#: ../../source/contributor-ref-good-first-contributions.rst:35 +#, fuzzy +msgid "Usage examples" +msgstr "Exemples de PyTorch" -#: ../../source/contributor-ref-good-first-contributions.rst:36 +#: ../../source/contributor-ref-good-first-contributions.rst:37 +#, fuzzy msgid "" -"We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are" -" a few ideas where we'd be happy to accept a PR:" +"We wish we had more time to write usage examples because they help users " +"to get started with building what they want. If you notice any missing " +"examples that could help others, feel free to contribute!" msgstr "" "Nous aimerions avoir plus de temps pour écrire des exemples d'utilisation" " car nous pensons qu'ils aident les utilisateurs à commencer à construire" " ce qu'ils veulent construire. Voici quelques idées pour lesquelles nous " "serions heureux d'accepter un RP :" -#: ../../source/contributor-ref-good-first-contributions.rst:40 -msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "LLaMA 2 fine-tuning avec Hugging Face et PyTorch" - -#: ../../source/contributor-ref-good-first-contributions.rst:41 -msgid "XGBoost" -msgstr "XGBoost" - -#: ../../source/contributor-ref-good-first-contributions.rst:42 -msgid "Android ONNX on-device training" -msgstr "Training sur téléphone à l'aide d'Android ONNX" - #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" msgstr "Protocoles d'agrégation sécurisés" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:6 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " -"protocol has not been implemented yet, so its diagram and abstraction may" -" not be accurate in practice. The SecAgg protocol can be considered as a " -"special case of the SecAgg+ protocol." +"While this term might be used in other places, here it refers to a series" +" of protocols, including ``SecAgg``, ``SecAgg+``, ``LightSecAgg``, " +"``FastSecAgg``, etc. This concept was first proposed by Bonawitz et al. " +"in `Practical Secure Aggregation for Federated Learning on User-Held Data" +" `_." msgstr "" -"Inclut les protocoles SecAgg, SecAgg+ et LightSecAgg. Le protocole " -"LightSecAgg n'a pas encore été mis en œuvre, de sorte que son diagramme " -"et son abstraction peuvent ne pas être exacts dans la pratique. Le " -"protocole SecAgg peut être considéré comme un cas particulier du " -"protocole SecAgg+." - -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 -#, fuzzy -msgid "The ``SecAgg+`` abstraction" -msgstr "L'abstraction :code:`SecAgg+`" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" -"In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have" -" keys of int type rather than ClientProxy type." +"Secure Aggregation protocols are used to securely aggregate model updates" +" from multiple clients while keeping the updates private. This is done by" +" encrypting the model updates before sending them to the server. The " +"server can decrypt only the aggregated model update without being able to" +" inspect individual updates." msgstr "" -"Dans cette implémentation, chaque client se verra attribuer un index " -"unique (int) pour une agrégation sécurisée, et donc de nombreux " -"dictionnaires python utilisés ont des clés de type int plutôt que de type" -" ClientProxy." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:16 msgid "" -"The Flower server will execute and process received results in the " -"following order:" +"Flower now provides the ``SecAgg`` and ``SecAgg+`` protocols. While we " +"plan to implement more protocols in the future, one may also implement " +"their own custom secure aggregation protocol via low-level APIs." msgstr "" -"Le serveur Flower exécutera et traitera les résultats reçus dans l'ordre " -"suivant :" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 -#, fuzzy -msgid "The ``LightSecAgg`` abstraction" -msgstr "L'abstraction :code:`LightSecAgg`" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:21 +msgid "The ``SecAgg+`` protocol in Flower" +msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 -msgid "Types" -msgstr "Types" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:23 +msgid "" +"The ``SecAgg+`` protocol is implemented using the ``SecAggPlusWorkflow`` " +"in the ``ServerApp`` and the ``secaggplus_mod`` in the ``ClientApp``. The" +" ``SecAgg`` protocol is a special case of the ``SecAgg+`` protocol, and " +"one may use ``SecAggWorkflow`` and ``secagg_mod`` for that." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:28 +msgid "" +"You may find a detailed example in the `Secure Aggregation Example " +"`_. The " +"documentation for the ``SecAgg+`` protocol configuration is available at " +"`SecAggPlusWorkflow `_." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:33 +msgid "" +"The logic of the ``SecAgg+`` protocol is illustrated in the following " +"sequence diagram: the dashed lines represent communication over the " +"network, and the solid lines represent communication within the same " +"process. The ``ServerApp`` is connected to ``SuperLink``, and the " +"``ClientApp`` is connected to the ``SuperNode``; thus, the communication " +"between the ``ServerApp`` and the ``ClientApp`` is done via the " +"``SuperLink`` and the ``SuperNode``." +msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 msgid "Contribute on GitHub" @@ -2265,7 +2273,6 @@ msgstr "" "particulièrement regarder les contributions :code:`baselines`." #: ../../source/contributor-tutorial-contribute-on-github.rst:357 -#: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" @@ -2345,7 +2352,6 @@ msgid "Get started as a contributor" msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/docker/run-as-subprocess.rst:11 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 @@ -2627,17 +2633,11 @@ msgstr "Collecte centralisée des données" #: ../../source/docker/enable-tls.rst:4 msgid "" "When operating in a production environment, it is strongly recommended to" -" enable Transport Layer Security (TLS) for each Flower Component to " +" enable Transport Layer Security (TLS) for each Flower component to " "ensure secure communication." msgstr "" -#: ../../source/docker/enable-tls.rst:7 -msgid "" -"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "" - -#: ../../source/docker/enable-tls.rst:12 +#: ../../source/docker/enable-tls.rst:9 msgid "" "For testing purposes, you can generate your own self-signed certificates." " The `Enable SSL connections ``: The name of your SuperLink image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2802,19 +2818,12 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:72 +#: ../../source/docker/enable-tls.rst:79 #, fuzzy -msgid "SuperNode" +msgid "**SuperNode**" msgstr "flower-superlink" -#: ../../source/docker/enable-tls.rst:74 -msgid "" -"Assuming that the ``ca.crt`` certificate already exists locally, we can " -"use the flag ``--volume`` to mount the local certificate into the " -"container's ``/app/`` directory." -msgstr "" - -#: ../../source/docker/enable-tls.rst:79 +#: ../../source/docker/enable-tls.rst:83 ../../source/docker/enable-tls.rst:189 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2822,24 +2831,24 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"current working directory of the host machine as a read-only volume at " -"the ``/app/ca.crt``" +"file from the ``superlink-certificates`` directory of the host machine as" +" a read-only" msgstr "" #: ../../source/docker/enable-tls.rst #, fuzzy -msgid "directory inside the container." +msgid "volume at the ``/app/ca.crt`` directory inside the container." msgstr "Utiliser les conteneurs VS Code Remote" -#: ../../source/docker/enable-tls.rst -msgid "" -":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " -"the image to be run and the specific" +#: ../../source/docker/enable-tls.rst:101 +msgid "````: The name of your SuperNode image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2852,60 +2861,193 @@ msgstr "" msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/docker/enable-tls.rst:105 -msgid "SuperExec" +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``process``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:109 +msgid "" +"In isolation mode ``process``, the ServerApp and ClientApp run in their " +"own processes. Unlike in isolation mode ``subprocess``, the SuperLink or " +"SuperNode does not attempt to create the respective processes; instead, " +"they must be created externally." +msgstr "" + +#: ../../source/docker/enable-tls.rst:113 +msgid "" +"It is possible to run only the SuperLink in isolation mode ``subprocess``" +" and the SuperNode in isolation mode ``process``, or vice versa, or even " +"both with isolation mode ``process``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:117 +msgid "**SuperLink and ServerApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:122 +msgid "" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local directory " +"into the SuperLink container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory in the current working directory of " +"the host" msgstr "" -#: ../../source/docker/enable-tls.rst:107 +#: ../../source/docker/enable-tls.rst msgid "" -"Assuming all files we need are in the local ``certificates`` directory " -"where the SuperExec will be executed from, we can use the flag " -"``--volume`` to mount the local directory into the ``/app/certificates/``" -" directory of the container:" +"machine as a read-only volume at the ``/app/certificates`` directory " +"inside the container." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " "the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "SuperExec." +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-certfile certificates/server.pem``: Specify the location of the " -"SuperExec's" +"``--isolation process``: Tells the SuperLink that the ServerApp is " +"created by separate" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "independent process. The SuperLink does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:168 +#: ../../source/docker/tutorial-quickstart-docker.rst:207 +#, fuzzy +msgid "Start the ServerApp container:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/enable-tls.rst:181 +msgid "````: The name of your ServerApp image to be run." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"The ``certificates/server.pem`` file is used to identify the SuperExec " -"and to encrypt the" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-keyfile certificates/server.key``: Specify the location of the " -"SuperExec's" +"unencrypted communication. Secure connections will be added in future " +"releases." +msgstr "" + +#: ../../source/docker/enable-tls.rst:185 +msgid "**SuperNode and ClientApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:192 +#, fuzzy +msgid "Start the SuperNode container:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt`` file from the" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"``--executor-config root-" -"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +"``superlink-certificates`` directory of the host machine as a read-only " +"volume at the ``/app/ca.crt``" msgstr "" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" + #: ../../source/docker/enable-tls.rst msgid "" -"location of the CA certificate file inside the container that the " -"SuperExec executor" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "should use to verify the SuperLink's identity." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" + +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:220 +#, fuzzy +msgid "Start the ClientApp container:" +msgstr "Utilisation du moteur du client virtuel" + +#: ../../source/docker/enable-tls.rst:233 +msgid "````: The name of your ClientApp image to be run." +msgstr "" + +#: ../../source/docker/enable-tls.rst:237 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:54 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/enable-tls.rst:239 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:330 +msgid "pyproject.toml" +msgstr "" + +#: ../../source/docker/enable-tls.rst:246 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "" +"The path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/enable-tls.rst:251 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/enable-tls.rst:254 +msgid "" +"In this example, ``local-deployment-tls`` has been used. Just remember to" +" replace ``local-deployment-tls`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" #: ../../source/docker/index.rst:2 @@ -2969,6 +3111,13 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + #: ../../source/docker/persist-superlink-state.rst:21 msgid "" "In the example below, we create a new directory called ``state``, change " @@ -3067,45 +3216,131 @@ msgstr "Démarrer le serveur" #: ../../source/docker/run-as-subprocess.rst:2 #, fuzzy -msgid "Run ClientApp as a Subprocess" +msgid "Run ServerApp or ClientApp as a Subprocess" msgstr "Vérifier le format et tester le code" #: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"In this mode, the ClientApp is executed as a subprocess within the " -"SuperNode Docker container, rather than running in a separate container. " -"This approach reduces the number of running containers, which can be " -"beneficial for environments with limited resources. However, it also " -"means that the ClientApp is no longer isolated from the SuperNode, which " -"may introduce additional security concerns." +"The SuperLink and SuperNode components support two distinct isolation " +"modes, allowing for flexible deployment and control:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:13 +#: ../../source/docker/run-as-subprocess.rst:7 msgid "" -"Before running the ClientApp as a subprocess, ensure that the FAB " -"dependencies have been installed in the SuperNode images. This can be " -"done by extending the SuperNode image:" +"Subprocess Mode: In this configuration (default), the SuperLink and " +"SuperNode take responsibility for launching the ServerApp and ClientApp " +"processes internally. This differs from the ``process`` isolation-mode " +"which uses separate containers, as demonstrated in the :doc:`tutorial-" +"quickstart-docker` guide." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:12 +msgid "" +"Using the ``subprocess`` approach reduces the number of running " +"containers, which can be beneficial for environments with limited " +"resources. However, it also means that the applications are not isolated " +"from their parent containers, which may introduce additional security " +"concerns." msgstr "" #: ../../source/docker/run-as-subprocess.rst:17 +msgid "" +"Process Mode: In this mode, the ServerApp and ClientApps run in " +"completely separate processes. Unlike the alternative Subprocess mode, " +"the SuperLink or SuperNode does not attempt to create or manage these " +"processes. Instead, they must be started externally." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:22 +msgid "" +"Both modes can be mixed for added flexibility. For instance, you can run " +"the SuperLink in ``subprocess`` mode while keeping the SuperNode in " +"``process`` mode, or vice versa." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:25 +msgid "" +"To run the SuperLink and SuperNode in isolation mode ``process``, refer " +"to the :doc:`tutorial-quickstart-docker` guide. To run them in " +"``subprocess`` mode, follow the instructions below." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 #, fuzzy -msgid "Dockerfile.supernode" -msgstr "Serveur de Flower" +msgid "ServerApp" +msgstr "serveur" + +#: ../../source/docker/run-as-subprocess.rst:33 +#: ../../source/docker/run-as-subprocess.rst:74 +#, fuzzy +msgid "**Prerequisites**" +msgstr "Prérequis" + +#: ../../source/docker/run-as-subprocess.rst:35 +msgid "" +"1. Before running the ServerApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperLink images. This can be " +"done by extending the SuperLink image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:38 +#, fuzzy +msgid "superlink.Dockerfile" +msgstr "Démarrer le serveur" + +#: ../../source/docker/run-as-subprocess.rst:52 +msgid "" +"2. Next, build the SuperLink Docker image by running the following " +"command in the directory where Dockerfile is located:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:59 +#, fuzzy +msgid "**Run the ServerApp as a Subprocess**" +msgstr "Vérifier le format et tester le code" -#: ../../source/docker/run-as-subprocess.rst:31 +#: ../../source/docker/run-as-subprocess.rst:61 msgid "" -"Next, build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile is located:" +"Start the SuperLink and run the ServerApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "client" + +#: ../../source/docker/run-as-subprocess.rst:76 +msgid "" +"1. Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:39 -msgid "Run the ClientApp as a Subprocess" +#: ../../source/docker/run-as-subprocess.rst:80 +#, fuzzy +msgid "supernode.Dockerfile" +msgstr "Démarrer le serveur" + +#: ../../source/docker/run-as-subprocess.rst:94 +msgid "" +"2. Next, build the SuperNode Docker image by running the following " +"command in the directory where Dockerfile is located:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:41 +#: ../../source/docker/run-as-subprocess.rst:101 +#, fuzzy +msgid "**Run the ClientApp as a Subprocess**" +msgstr "Vérifier le format et tester le code" + +#: ../../source/docker/run-as-subprocess.rst:103 msgid "" -"Start the SuperNode with the flag ``--isolation subprocess``, which tells" -" the SuperNode to execute the ClientApp as a subprocess:" +"Start the SuperNode and run the ClientApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 @@ -3151,7 +3386,9 @@ msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 -msgid "Docker Compose is `installed `_." +msgid "" +"Docker Compose V2 is `installed " +"`_." msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 @@ -3172,31 +3409,14 @@ msgid "" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:45 #, fuzzy -msgid "Build and start the services using the following command:" -msgstr "Active la virtualenv en exécutant la commande suivante :" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 -#, fuzzy -msgid "" -"Append the following lines to the end of the ``pyproject.toml`` file and " -"save it:" -msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 -#: ../../source/docker/tutorial-quickstart-docker.rst:324 -msgid "pyproject.toml" -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"You can customize the string that follows ``tool.flwr.federations.`` to " -"fit your needs. However, please note that the string cannot contain a dot" -" (``.``)." -msgstr "" +"Export the version of Flower that your environment uses. Then, build and " +"start the services using the following command:" +msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3204,77 +3424,78 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 -#, fuzzy -msgid "Run the example:" -msgstr "Fédérer l'exemple" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 -msgid "Follow the logs of the SuperExec service:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:72 +msgid "Run the example and follow the logs of the ``ServerApp`` :" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:78 msgid "" "That is all it takes! You can monitor the progress of the run through the" -" logs of the SuperExec." +" logs of the ``ServerApp``." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:91 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 #, fuzzy msgid "Limitations" msgstr "Simulation de moniteur" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 #, fuzzy msgid "Quickstart Example" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "quickstart-fastai" msgstr "Démarrage rapide fastai" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/ref-changelog.md:236 ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:879 ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:1001 ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1132 msgid "None" msgstr "Aucun" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-huggingface" msgstr "Quickstart tutorials" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-jax" msgstr "Démarrage rapide de JAX" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "Démarrage rapide de JAX" + #: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #, fuzzy msgid "" "The example has not yet been updated to work with the latest ``flwr`` " @@ -3285,65 +3506,56 @@ msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "quickstart-mlcube" -msgstr "Démarrage rapide de JAX" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 -#, fuzzy msgid "quickstart-mlx" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 #, fuzzy msgid "quickstart-monai" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-pandas" msgstr "Démarrage rapide des Pandas" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy msgid "quickstart-pytorch-lightning" msgstr "Démarrage rapide de PyTorch Lightning" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy msgid "quickstart-pytorch" msgstr "Démarrage rapide de PyTorch" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "quickstart-sklearn-tabular" msgstr "Démarrage rapide de scikit-learn" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy msgid "quickstart-tabnet" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tensorflow" msgstr "Démarrage rapide de TensorFlow" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 -msgid "Only runs on AMD64." -msgstr "" - #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -3372,8 +3584,8 @@ msgid "" "You will learn how to run the Flower client and server components on two " "separate machines, with Flower configured to use TLS encryption and " "persist SuperLink state across restarts. A server consists of a SuperLink" -" and ``SuperExec``. For more details about the Flower architecture, refer" -" to the :doc:`../explanation-flower-architecture` explainer page." +" and a ``ServerApp``. For more details about the Flower architecture, " +"refer to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 @@ -3428,134 +3640,144 @@ msgstr "" msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:47 msgid "" "Use the ``certs.yml`` Compose file to generate your own self-signed " "certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:212 msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:54 msgid "" "For production environments, you may have to use dedicated services to " "obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:57 msgid "" -"First, set the environment variables ``SUPERLINK_IP`` and " -"``SUPEREXEC_IP`` with the IP address from the remote machine. For " -"example, if the IP is ``192.168.2.33``, execute:" +"First, set the environment variable ``SUPERLINK_IP`` with the IP address " +"from the remote machine. For example, if the IP is ``192.168.2.33``, " +"execute:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:64 msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:71 msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:73 msgid "" "Use the method that works best for you to copy the ``server`` directory, " -"the certificates, and your Flower project to the remote machine." +"the certificates, and the ``pyproject.toml`` file of your Flower project " +"to the remote machine." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:86 #, fuzzy msgid "Step 3: Start the Flower Server Components" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:88 msgid "" "Log into the remote machine using ``ssh`` and run the following command " -"to start the SuperLink and SuperExec services:" +"to start the SuperLink and ``ServerApp`` services:" msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``server`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the server ``compose.yml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:107 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions. After exporting the ``PROJECT_DIR`` (after line " +"4), run the following commands:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:116 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:165 +msgid "" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:118 msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:121 #, fuzzy msgid "Step 4: Start the Flower Client Components" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:123 msgid "" "On your local machine, run the following command to start the client " "components:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:133 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``client`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the client ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:137 #, fuzzy msgid "Step 5: Run Your Flower Project" msgstr "Serveur de Flower" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Specify the remote SuperExec IP addresses and the path to the root " -"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " -"the ``pyproject.toml`` file. Here, we have named our remote federation " -"``remote-superexec``:" +"Specify the remote SuperLink IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-deployment]`` table in" +" the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-deployment``:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:143 #, fuzzy msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "Démarrage rapide de scikit-learn" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 -msgid "" -"The Path of the ``root-certificates`` should be relative to the location " -"of the ``pyproject.toml`` file." -msgstr "" - -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 -msgid "To run the project, execute:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:155 +msgid "Run the project and follow the ``ServerApp`` logs:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 msgid "" "That's it! With these steps, you've set up Flower on two separate " "machines and are ready to start using it." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:165 msgid "Step 6: Clean Up" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:167 #, fuzzy msgid "Shut down the Flower client components:" msgstr "Client de Flower" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:174 msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" @@ -3577,16 +3799,16 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:33 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:39 +#: ../../source/docker/tutorial-quickstart-docker.rst:38 msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:44 msgid "" "User-defined networks, such as ``flwr-network``, enable IP resolution of " "container names, a feature absent in the default bridge network. This " @@ -3594,53 +3816,56 @@ msgid "" "first." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#: ../../source/docker/tutorial-quickstart-docker.rst:49 #, fuzzy msgid "Step 2: Start the SuperLink" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 -#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:64 +#: ../../source/docker/tutorial-quickstart-docker.rst:51 #, fuzzy msgid "Open your terminal and run:" msgstr "Ouvre un autre terminal et démarre le deuxième client :" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "Understand the command" +msgid "" +"``-p 9091:9091 -p 9092:9092 -p 9093:9093``: Map port ``9091``, ``9092`` " +"and ``9093`` of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " -"container to the same port of" +"container to the same port of the host machine, allowing other services " +"to access the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "the host machine, allowing other services to access the Driver API on" +msgid "" +"ServerAppIO API on ``http://localhost:9091``, the Fleet API on " +"``http://localhost:9092`` and" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgid "the Exec API on ``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:71 -#: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:219 -#: ../../source/docker/tutorial-quickstart-docker.rst:309 +#: ../../source/docker/tutorial-quickstart-docker.rst:74 +#: ../../source/docker/tutorial-quickstart-docker.rst:114 +#: ../../source/docker/tutorial-quickstart-docker.rst:223 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:72 +#: ../../source/docker/tutorial-quickstart-docker.rst:75 msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:73 -#: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:220 -#: ../../source/docker/tutorial-quickstart-docker.rst:311 +#: ../../source/docker/tutorial-quickstart-docker.rst:76 +#: ../../source/docker/tutorial-quickstart-docker.rst:116 +#: ../../source/docker/tutorial-quickstart-docker.rst:225 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3662,16 +3887,26 @@ msgstr "" msgid "unencrypted communication." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"independent process. The SuperLink does not attempt to create it. You can" +" learn more about" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the different process modes here: :doc:`run-as-subprocess`." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:86 #, fuzzy -msgid "Step 3: Start the SuperNode" +msgid "Step 3: Start the SuperNodes" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker.rst:82 +#: ../../source/docker/tutorial-quickstart-docker.rst:88 msgid "Start two SuperNode containers." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:84 +#: ../../source/docker/tutorial-quickstart-docker.rst:90 msgid "Start the first container:" msgstr "" @@ -3687,18 +3922,18 @@ msgstr "" msgid "``http://localhost:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:115 msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " -"to be run and the specific tag" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: This is the " +"name of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "of the image." +msgid "image to be run and the specific tag of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst @@ -3723,51 +3958,54 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--supernode-address 0.0.0.0:9094``: Set the address and port number " -"that the SuperNode" +"``--clientappio-api-address 0.0.0.0:9094``: Set the address and port " +"number that the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "is listening on." +msgid "SuperNode is listening on to communicate with the ClientApp. If" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--isolation process``: Tells the SuperNode that the ClientApp is " -"created by separate" +"two SuperNodes are started on the same machine, set two different port " +"numbers for each SuperNode." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "independent process. The SuperNode does not attempt to create it." +msgid "" +"(E.g. In the next step, we set the second SuperNode container to listen " +"on port 9095)" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#: ../../source/docker/tutorial-quickstart-docker.rst:132 #, fuzzy msgid "Start the second container:" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker.rst:142 -msgid "Step 4: Start the ClientApp" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#, fuzzy +msgid "Step 4: Start a ServerApp" +msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker.rst:144 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 msgid "" -"The ClientApp Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own ClientApp image. In order to " +"The ServerApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ServerApp image. In order to " "install the FAB dependencies, you will need to create a Dockerfile that " -"extends the ClientApp image and installs the required dependencies." +"extends the ServerApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:149 +#: ../../source/docker/tutorial-quickstart-docker.rst:157 msgid "" -"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " -"the following code into it:" +"Create a ServerApp Dockerfile called ``serverapp.Dockerfile`` and paste " +"the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#: ../../source/docker/tutorial-quickstart-docker.rst:160 #, fuzzy -msgid "Dockerfile.clientapp" -msgstr "Flower ClientApp." +msgid "serverapp.Dockerfile" +msgstr "Démarrer le serveur" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "Understand the Dockerfile" @@ -3775,13 +4013,13 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/serverapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/clientapp image``, version :substitution-" +"to be built from is the ``flwr/serverapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" @@ -3839,7 +4077,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"``ENTRYPOINT [\"flwr-serverapp\"]``: Set the command ``flwr-serverapp`` " "to be" msgstr "" @@ -3847,7 +4085,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:186 +#: ../../source/docker/tutorial-quickstart-docker.rst:194 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3856,211 +4094,205 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#: ../../source/docker/tutorial-quickstart-docker.rst:200 msgid "" -"Next, build the ClientApp Docker image by running the following command " -"in the directory where the Dockerfile is located:" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the ServerApp image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:201 -msgid "" -"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " -"Remember that these values are merely examples, and you can customize " -"them according to your requirements." +#: ../../source/docker/tutorial-quickstart-docker.rst:224 +msgid "``--name serverapp``: Assign the name ``serverapp`` to the container." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:205 -#, fuzzy -msgid "Start the first ClientApp container:" -msgstr "Utilisation du moteur du client virtuel" - #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +"``flwr_serverapp:0.0.1``: This is the name of the image to be run and the" " specific tag" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" -" the address" +msgid "of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``supernode-1:9094``." +msgid "" +"``--serverappio-api-address superlink:9091``: Connect to the SuperLink's " +"ServerAppIO API" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:226 -msgid "Start the second ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "at the address ``superlink:9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:234 #, fuzzy -msgid "Step 5: Start the SuperExec" +msgid "Step 5: Start the ClientApp" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#: ../../source/docker/tutorial-quickstart-docker.rst:236 msgid "" -"The procedure for building and running a SuperExec image is almost " -"identical to the ClientApp image." +"The procedure for building and running a ClientApp image is almost " +"identical to the ServerApp image." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" -"Similar to the ClientApp image, you will need to create a Dockerfile that" -" extends the SuperExec image and installs the required FAB dependencies." +"Similar to the ServerApp image, you will need to create a Dockerfile that" +" extends the ClientApp image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " -"the following code in:" +"Create a ClientApp Dockerfile called ``clientapp.Dockerfile`` and paste " +"the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:248 -msgid "Dockerfile.superexec" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#, fuzzy +msgid "clientapp.Dockerfile" +msgstr "client" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/superexec image``, version :substitution-" +"to be built from is the ``flwr/clientapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" -"superexec`` to be" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:283 +#: ../../source/docker/tutorial-quickstart-docker.rst:286 msgid "" -"Afterward, in the directory that holds the Dockerfile, execute this " -"Docker command to build the SuperExec image:" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:290 #, fuzzy -msgid "Start the SuperExec container:" -msgstr "Démarrer le serveur" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" -msgstr "" +msgid "Start the first ClientApp container:" +msgstr "Utilisation du moteur du client virtuel" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"the host machine, allowing you to access the SuperExec API on " -"``http://localhost:9093``." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst:310 -msgid "``--name superexec``: Assign the name ``superexec`` to the container." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" -" specific tag" +"``--clientappio-api-address supernode-1:9094``: Connect to the " +"SuperNode's ClientAppIO" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " -"SuperExec executor to" +msgid "API at the address ``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "connect to the SuperLink running on port ``9091``." +#: ../../source/docker/tutorial-quickstart-docker.rst:314 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:320 +#: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#: ../../source/docker/tutorial-quickstart-docker.rst:328 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/docker/tutorial-quickstart-docker.rst:331 -msgid "Run the ``quickstart-docker`` project by executing the command:" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:337 -msgid "Follow the SuperExec logs to track the execution of the run:" +msgid "" +"Run the ``quickstart-docker`` project and follow the ServerApp logs to " +"track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:345 #, fuzzy msgid "Step 7: Update the Application" msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/docker/tutorial-quickstart-docker.rst:346 +#: ../../source/docker/tutorial-quickstart-docker.rst:347 msgid "" "Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#: ../../source/docker/tutorial-quickstart-docker.rst:350 #, fuzzy msgid "quickstart_docker/task.py" msgstr "Démarrage rapide des Pandas" -#: ../../source/docker/tutorial-quickstart-docker.rst:356 -msgid "Stop the current ClientApp containers:" +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#, fuzzy +msgid "Stop the current ServerApp and ClientApp containers:" +msgstr "Utilisation du moteur du client virtuel" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:361 +msgid "" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:364 +msgid "If you haven’t made any changes, you can skip steps 2 through 4." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#: ../../source/docker/tutorial-quickstart-docker.rst:370 #, fuzzy -msgid "Rebuild the FAB and ClientApp image:" +msgid "Rebuild ServerApp and ClientApp images:" msgstr "Chargement des données" -#: ../../source/docker/tutorial-quickstart-docker.rst:368 -msgid "Launch two new ClientApp containers based on the newly built image:" +#: ../../source/docker/tutorial-quickstart-docker.rst:377 +msgid "" +"Launch one new ServerApp and two new ClientApp containers based on the " +"newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:383 +#: ../../source/docker/tutorial-quickstart-docker.rst:402 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:390 +#: ../../source/docker/tutorial-quickstart-docker.rst:409 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:392 +#: ../../source/docker/tutorial-quickstart-docker.rst:411 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 -#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker.rst:423 #, fuzzy msgid "Where to Go Next" msgstr "Par où commencer" -#: ../../source/docker/tutorial-quickstart-docker.rst:406 +#: ../../source/docker/tutorial-quickstart-docker.rst:425 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:407 +#: ../../source/docker/tutorial-quickstart-docker.rst:426 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:427 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -4087,176 +4319,161 @@ msgstr "" msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:39 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:46 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " -"SuperExec and SuperNode images correctly." +"``ServerApp`` and ``ClientApp`` images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Serveur de Flower" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:53 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:61 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:175 msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 -msgid "" -"``-f compose.yml``: Specify the YAML file that contains the basic Flower " -"service definitions." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:181 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:182 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:77 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 msgid "" -"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" -" the SuperExec addresses in the ``pyproject.toml`` file." +"To ensure the ``flwr`` CLI connects to the SuperLink, you need to specify" +" the SuperLink addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:85 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:225 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 -msgid "Execute the command to run the quickstart example:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 -msgid "Monitor the SuperExec logs and wait for the summary to appear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:94 +msgid "" +"Run the quickstart example, monitor the ``ServerApp`` logs and wait for " +"the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:102 #, fuzzy msgid "Step 4: Update the Application" msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:121 #, fuzzy msgid "Rebuild and restart the services." msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 -msgid "" -"If you have modified the dependencies listed in your ``pyproject.toml`` " -"file, it is essential to rebuild images." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:128 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:130 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:136 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 -msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the ``ServerApp`` logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:155 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:157 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:162 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 -msgid "" -"For more information, consult the following page: :doc:`persist-" -"superlink-state`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:167 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +msgid "Run the command:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 -msgid "Run the command:" +msgid "" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst @@ -4276,17 +4493,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:238 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:197 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -4294,120 +4511,105 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:205 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:207 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 -msgid "Step 7: Add another SuperNode" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 -msgid "" -"You can add more SuperNodes and ClientApps by duplicating their " -"definitions in the ``compose.yml`` file." -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:245 +#, fuzzy +msgid "Step 7: Add another SuperNode and ClientApp" +msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 msgid "" -"Just give each new SuperNode and ClientApp service a unique service name " -"like ``supernode-3``, ``clientapp-3``, etc." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 -msgid "In ``compose.yml``, add the following:" +"You can add more SuperNodes and ClientApps by uncommenting their " +"definitions in the ``compose.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:250 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:302 msgid "" -"If you also want to enable TLS for the new SuperNodes, duplicate the " -"SuperNode definition for each new SuperNode service in the ``with-" -"tls.yml`` file." +"If you also want to enable TLS for the new SuperNode, uncomment the " +"definition in the ``with-tls.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 -msgid "" -"Make sure that the names of the services match with the one in the " -"``compose.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 -msgid "In ``with-tls.yml``, add the following:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:305 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:326 +#, fuzzy +msgid "Restart the services with:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:335 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 -msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:340 +msgid "Comment out the lines 2-6 and uncomment the lines 7-13:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:342 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:376 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:378 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 msgid "Remove all services and volumes:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:402 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "Démarrage rapide XGBoost" @@ -4431,551 +4633,6 @@ msgid "" "tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 -#, fuzzy -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." -msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante avec `FedBN `_, une stratégie" -" de formation fédérée conçue pour les données non-identifiées. Nous " -"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " -"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " -"Lors de l'application de FedBN, seules quelques modifications sont " -"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " -"fédération `_." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "Centralized Training" -msgstr "Formation centralisée" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 -#, fuzzy -msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called ``cifar.py``, revised part is " -"shown below:" -msgstr "" -"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " -"Centralized To Federated `_. La seule chose à faire est de modifier " -"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" -"dessous :" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 -msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "" -"L'architecture du modèle définie dans la classe Net() est ajoutée avec " -"les couches de normalisation par lots en conséquence." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 -msgid "You can now run your machine learning workload:" -msgstr "" -"Tu peux maintenant exécuter ta charge de travail d'apprentissage " -"automatique :" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 -#, fuzzy -msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." -msgstr "" -"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un système d'apprentissage fédéré au sein de " -"FedBN, le système se compose d'un serveur et de deux clients." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 -msgid "Federated Training" -msgstr "Formation fédérée" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 -#, fuzzy -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only ``get_parameters`` and ``set_parameters`` function " -"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" -" PyTorch - From Centralized To Federated `. first." -msgstr "" -"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " -"`_, les parties suivantes sont faciles à suivre, seules " -"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " -":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " -"veuillez lire `Exemple : PyTorch - From Centralized To Federated " -"`. d'abord." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -"``server.py`` keeps unchanged, we can start the server directly." -msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " -":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " -"directement." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 -#, fuzzy -msgid "" -"Finally, we will revise our *client* logic by changing ``get_parameters``" -" and ``set_parameters`` in ``client.py``, we will exclude batch " -"normalization parameters from model parameter list when sending to or " -"receiving from the server." -msgstr "" -"Enfin, nous allons réviser notre logique *client* en modifiant " -":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " -"nous allons exclure les paramètres de normalisation des lots de la liste " -"des paramètres du modèle lors de l'envoi ou de la réception depuis le " -"serveur." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras ton projet PyTorch " -"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" -" FedBN sur deux clients. Félicitations !" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 -#: ../../source/tutorial-quickstart-jax.rst:319 -msgid "Next Steps" -msgstr "Prochaines étapes" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 -#, fuzzy -msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" -msgstr "" -"Le code source complet de cet exemple se trouve ici " -"`_. Notre exemple est bien sûr un peu trop " -"simplifié parce que les deux clients chargent exactement le même ensemble" -" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " -"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " -"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " -"?" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "Exemple : PyTorch - De la centralisation à la fédération" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " -"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " -"présentons cette tâche d'apprentissage automatique avec une approche " -"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " -"PyTorch " -"`_. " -"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." -msgstr "" -"Nous commençons par une brève description du code d'entraînement CNN " -"centralisé. Si tu veux une explication plus approfondie de ce qui se " -"passe, jette un coup d'œil au tutoriel officiel `PyTorch " -"`_." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 -#, fuzzy -msgid "" -"Let's create a new file called ``cifar.py`` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as ``torch`` and ``torchvision``) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the" -" federated learning components at a later point." -msgstr "" -"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " -"composants requis pour une formation traditionnelle (centralisée) sur le " -"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " -"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " -"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " -"toutes ces importations telles quelles même lorsque nous ajouterons les " -"composants d'apprentissage fédéré à un moment ultérieur." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 -#, fuzzy -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in ``class Net()``." -msgstr "" -"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" -" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " -"L'architecture du modèle (un réseau neuronal convolutif très simple) est " -"définie dans :code:`class Net()`." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 -#, fuzzy -msgid "" -"The ``load_data()`` function loads the CIFAR-10 training and test sets. " -"The ``transform`` normalized the data after loading." -msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test CIFAR-10. La fonction :code:`transform` normalise les données après" -" leur chargement." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 -#, fuzzy -msgid "" -"We now need to define the training (function ``train()``) which loops " -"over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "" -"Nous devons maintenant définir la formation (fonction :code:`train()`) " -"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " -"rétropropage, puis effectue une étape d'optimisation pour chaque lot " -"d'exemples de formation." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 -#, fuzzy -msgid "" -"The evaluation of the model is defined in the function ``test()``. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." -msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " -"fonction boucle sur tous les échantillons de test et mesure la perte du " -"modèle en fonction de l'ensemble des données de test." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" -" notre CNN sur CIFAR-10." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." -msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "" -"Le projet simple d'apprentissage automatique discuté dans la section " -"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," -" nous appelons cela l'apprentissage centralisé. Ce concept " -"d'apprentissage centralisé, comme le montre la section précédente, est " -"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " -"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " -"travail d'apprentissage automatique de manière fédérée, tu dois alors " -"changer la plupart de ton code et tout mettre en place à partir de zéro, " -"ce qui peut représenter un effort considérable." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "" -"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " -"une configuration d'apprentissage fédéré sans avoir besoin d'une " -"réécriture majeure." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 -#, fuzzy -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in ``cifar.py`` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." -msgstr "" -"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " -"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " -"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " -"clients. Les *clients* exécutent la formation et mettent à jour les " -"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " -"un tour du processus d'apprentissage fédéré et nous répétons cette " -"opération pour plusieurs tours." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 -#: ../../source/tutorial-quickstart-jax.rst:147 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -"``server.py`` first. The *server* needs to import the Flower package " -"``flwr``. Next, we use the ``start_server`` function to start a server " -"and tell it to perform three rounds of federated learning." -msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " -"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" -" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " -"pour démarrer un serveur et lui demander d'effectuer trois cycles " -"d'apprentissage fédéré." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 -#: ../../source/tutorial-quickstart-jax.rst:161 -msgid "We can already start the *server*:" -msgstr "Nous pouvons déjà démarrer le *serveur* :" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#, fuzzy -msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined centralized training in ``cifar.py``. Our " -"*client* needs to import ``flwr``, but also ``torch`` to update the " -"parameters on our PyTorch model:" -msgstr "" -"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " -"et nous appuyer sur la formation centralisée définie précédemment dans " -":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " -":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 -#, fuzzy -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " -"than ``Client`` if you use a framework with good NumPy interoperability " -"(like PyTorch or TensorFlow/Keras) because it avoids some of the " -"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " -"implement four methods, two methods for getting/setting model parameters," -" one method for training the model, and one method for testing the model:" -msgstr "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#, fuzzy -msgid "``set_parameters``" -msgstr ":code:`set_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 -#: ../../source/tutorial-quickstart-jax.rst:192 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "règle les paramètres du modèle local reçus du serveur" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "" -"loop over the list of model parameters received as NumPy ``ndarray``'s " -"(think list of neural network layers)" -msgstr "" -"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " -":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#: ../../source/tutorial-quickstart-scikitlearn.rst:129 -#, fuzzy -msgid "``get_parameters``" -msgstr ":code:`get_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#, fuzzy -msgid "" -"get the model parameters and return them as a list of NumPy ``ndarray``'s" -" (which is what ``flwr.client.NumPyClient`` expects)" -msgstr "" -"récupère les paramètres du modèle et les renvoie sous forme de liste de " -":code:`ndarray` NumPy (ce qui correspond à ce que " -":code:`flwr.client.NumPyClient` attend)" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "``fit``" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 -#: ../../source/tutorial-quickstart-jax.rst:200 -#: ../../source/tutorial-quickstart-jax.rst:205 -msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "" -"mettre à jour les paramètres du modèle local avec les paramètres reçus du" -" serveur" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -msgid "train the model on the local training set" -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 -msgid "get the updated local model weights and return them to the server" -msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -#: ../../source/tutorial-quickstart-jax.rst:208 -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -#, fuzzy -msgid "``evaluate``" -msgstr ":code:`évaluer`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 -#: ../../source/tutorial-quickstart-jax.rst:207 -msgid "evaluate the updated model on the local test set" -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -msgid "return the local loss and accuracy to the server" -msgstr "renvoie la perte locale et la précision au serveur" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 -#, fuzzy -msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " -"So what we really do here is we tell Flower through our ``NumPyClient`` " -"subclass which of our already defined functions to call for training and " -"evaluation. We included type annotations to give you a better " -"understanding of the data types that get passed around." -msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`test()` définies " -"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " -"c'est que nous indiquons à Flower, par le biais de notre sous-classe " -":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 -#, fuzzy -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a ``CifarClient``, and starts this client. You load your " -"data and model by using ``cifar.py``. Start ``CifarClient`` with the " -"function ``fl.client.start_client()`` by pointing it at the same IP " -"address we used in ``server.py``:" -msgstr "" -"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " -"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" -" données et ton modèle en utilisant :code:`cifar.py`. Démarre " -":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " -"la faisant pointer sur la même adresse IP que celle que nous avons " -"utilisée dans :code:`server.py` :" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 -#: ../../source/tutorial-quickstart-jax.rst:309 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "" -"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " -"les commandes suivantes" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "" -"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " -"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " -"l'apprentissage fédéré sur deux clients. Félicitations !" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 -#, fuzzy -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" -"Le code source complet de cet exemple : `PyTorch : From Centralized To " -"Federated (Code) `_. Notre exemple est, bien sûr, " -"un peu trop simplifié parce que les deux clients chargent exactement le " -"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " -"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " -"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " -"d'autres clients ?" - #: ../../source/explanation-differential-privacy.rst:2 #: ../../source/explanation-differential-privacy.rst:14 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 @@ -5199,7 +4856,7 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:141 -#: ../../source/how-to-use-differential-privacy.rst:113 +#: ../../source/how-to-use-differential-privacy.rst:114 #, fuzzy msgid "Local Differential Privacy" msgstr "Confidentialité différentielle" @@ -5277,7 +4934,6 @@ msgstr "" "17455-17466." #: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated evaluation" msgstr "Évaluation fédérée" @@ -5312,11 +4968,11 @@ msgstr "" "prendre les paramètres du modèle global actuel comme entrée et renvoyer " "les résultats de l'évaluation :" -#: ../../source/explanation-federated-evaluation.rst:61 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "Custom Strategies" msgstr "Stratégies personnalisées" -#: ../../source/explanation-federated-evaluation.rst:63 +#: ../../source/explanation-federated-evaluation.rst:72 #, fuzzy msgid "" "The ``Strategy`` abstraction provides a method called ``evaluate`` that " @@ -5330,15 +4986,16 @@ msgstr "" "appelle :code:`evaluate` après l'agrégation des paramètres et avant " "l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/explanation-federated-evaluation.rst:69 +#: ../../source/explanation-federated-evaluation.rst:78 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated Evaluation" msgstr "Évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:72 +#: ../../source/explanation-federated-evaluation.rst:81 msgid "Implementing Federated Evaluation" msgstr "Mise en œuvre de l'évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:74 +#: ../../source/explanation-federated-evaluation.rst:83 #, fuzzy msgid "" "Client-side evaluation happens in the ``Client.evaluate`` method and can " @@ -5347,11 +5004,11 @@ msgstr "" "L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " "et peut être configurée côté serveur." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/explanation-federated-evaluation.rst:116 msgid "Configuring Federated Evaluation" msgstr "Configuration de l'évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:110 +#: ../../source/explanation-federated-evaluation.rst:118 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" @@ -5359,7 +5016,7 @@ msgstr "" "L'évaluation fédérée peut être configurée du côté du serveur. Les " "stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/explanation-federated-evaluation.rst:113 +#: ../../source/explanation-federated-evaluation.rst:121 #, fuzzy msgid "" "``fraction_evaluate``: a ``float`` defining the fraction of clients that " @@ -5375,7 +5032,7 @@ msgstr "" "aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " "à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/explanation-federated-evaluation.rst:118 +#: ../../source/explanation-federated-evaluation.rst:126 #, fuzzy msgid "" "``min_evaluate_clients``: an ``int``: the minimum number of clients to be" @@ -5388,7 +5045,7 @@ msgstr "" " sont connectés au serveur, alors :code:`20` clients seront sélectionnés " "pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:122 +#: ../../source/explanation-federated-evaluation.rst:130 #, fuzzy msgid "" "``min_available_clients``: an ``int`` that defines the minimum number of " @@ -5404,7 +5061,7 @@ msgstr "" "attendra que d'autres clients soient connectés avant de continuer à " "échantillonner des clients pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:127 +#: ../../source/explanation-federated-evaluation.rst:135 #, fuzzy msgid "" "``on_evaluate_config_fn``: a function that returns a configuration " @@ -5419,11 +5076,11 @@ msgstr "" "l'évaluation côté client depuis le côté serveur, par exemple pour " "configurer le nombre d'étapes de validation effectuées." -#: ../../source/explanation-federated-evaluation.rst:157 +#: ../../source/explanation-federated-evaluation.rst:177 msgid "Evaluating Local Model Updates During Training" msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/explanation-federated-evaluation.rst:159 +#: ../../source/explanation-federated-evaluation.rst:179 #, fuzzy msgid "" "Model parameters can also be evaluated during training. ``Client.fit`` " @@ -5433,17 +5090,18 @@ msgstr "" "formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " "arbitraires sous forme de dictionnaire :" -#: ../../source/explanation-federated-evaluation.rst:201 +#: ../../source/explanation-federated-evaluation.rst:220 msgid "Full Code Example" msgstr "Exemple de code complet" -#: ../../source/explanation-federated-evaluation.rst:203 +#: ../../source/explanation-federated-evaluation.rst:222 #, fuzzy msgid "" "For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"evaluation, see the `Advanced TensorFlow Example " +"`_" +" (the same approach can be applied to workloads implemented in any other " +"framework)." msgstr "" "Pour un exemple de code complet qui utilise à la fois l'évaluation " "centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " @@ -5656,625 +5314,18 @@ msgid "" "a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/explanation-flower-architecture.rst:121 -msgid "" -"To help you start and manage all of the concurrently executing training " -"runs, Flower offers one additional long-running server-side service " -"called **SuperExec**. When you type ``flwr run`` to start a new training " -"run, the ``flwr`` CLI bundles your local project (mainly your " -"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " -"**SuperExec** will then take care of starting and managing your " -"``ServerApp``, which in turn selects SuperNodes to execute your " -"``ClientApp``." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:128 -msgid "" -"This architecture allows many users to (concurrently) run their projects " -"on the same federation, simply by typing ``flwr run`` on their local " -"developer machine." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "Flower Deployment Engine with SuperExec" -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "The SuperExec service for managing concurrent training runs in Flower." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:141 +#: ../../source/explanation-flower-architecture.rst:123 msgid "" "This explanation covers the Flower Deployment Engine. An explanation " "covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:146 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" "As we continue to enhance Flower at a rapid pace, we'll periodically " "update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "Modèle FED" - -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "Table des matières" - -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[Table des matières](#table-of-contents)" - -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[Résumé](#résumé)" - -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[Motivation](#motivation)" - -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[Buts](#buts)" - -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[Non-objectifs](#non-objectifs)" - -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[Proposition](#proposition)" - -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[Inconvénients](#inconvénients)" - -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[Alternatives envisagées](#alternatives-considered)" - -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[Annexe](#appendix)" - -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "Résumé" - -#: ../../source/fed/0000-20200102-fed-template.md:26 -#, fuzzy -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "[TODO - phrase 1 : résumé du problème]" - -#: ../../source/fed/0000-20200102-fed-template.md:28 -#, fuzzy -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "[TODO - phrase 2 : résumé de la solution]" - -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "Motivation" - -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -#, fuzzy -msgid "\\[TODO\\]" -msgstr "[TODO]" - -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "Objectifs" - -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "Non-objectifs" - -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "Proposition" - -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "Inconvénients" - -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "Alternatives envisagées" - -#: ../../source/fed/0000-20200102-fed-template.md:52 -#, fuzzy -msgid "\\[Alternative 1\\]" -msgstr "[Alternative 1]" - -#: ../../source/fed/0000-20200102-fed-template.md:56 -#, fuzzy -msgid "\\[Alternative 2\\]" -msgstr "[Alternative 2]" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Doc sur l'amélioration des fleurs" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Métadonnées](#métadonnées)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[Workflow](#workflow)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "" -"Une amélioration de la fleur est un processus de développement " -"standardisé pour" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "" -"fournir une structure commune pour proposer des changements plus " -"importants" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "s'assurer que la motivation du changement est claire" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "" -"conserver les informations sur le projet dans un système de contrôle des " -"versions" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "" -"documenter la motivation des changements qui ont un impact sur " -"l'utilisateur" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 -msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "" -"s'assurer que les participants de la communauté peuvent mener à bien les " -"changements dans le cadre d'une ou plusieurs versions et que les parties " -"prenantes sont représentées de manière adéquate tout au long du processus" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "Par conséquent, un document d'amélioration combine des aspects de" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "une caractéristique, et un document de suivi des efforts" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "un document sur les exigences du produit" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "un document de conception" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 -msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "" -"en un seul fichier, qui est créé progressivement en collaboration avec la" -" communauté." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 -msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." -msgstr "" -"Pour les changements lointains ou les fonctionnalités proposées à Flower," -" une abstraction au-delà d'une simple question GitHub ou d'une demande de" -" tirage est nécessaire pour comprendre et communiquer les changements à " -"venir dans le projet." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 -msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." -msgstr "" -"L'objectif de ce processus est de réduire la quantité de \"connaissances " -"tribales\" dans notre communauté. En déplaçant les décisions des fils de " -"discussion Slack, des appels vidéo et des conversations de couloir vers " -"un artefact bien suivi, ce processus vise à améliorer la communication et" -" la découvrabilité." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "" -"Si une amélioration doit être décrite par écrit ou verbalement à " -"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " -"créer un document d'amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 -msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "" -"De même, tout effort technique (refactorisation, changement architectural" -" majeur) qui aura un impact sur une grande partie de la communauté de " -"développement doit également être communiqué à grande échelle. Le " -"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" -" sur l'utilisateur ou l'opérateur type." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 -msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." -msgstr "" -"Pour les petits changements et ajouts, passer par le processus " -"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " -"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " -"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " -"le fonctionnement ou l'utilisation de Flower." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "" -"Les améliorations sont différentes des demandes de fonctionnalités, car " -"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " -"défendues par les membres de la communauté." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 -msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "" -"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" -" défini et un flux de travail pour examiner et stocker les documents " -"d'amélioration pour référence - le Doc d'amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Modèle de document d'amélioration" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 -msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "" -"Chaque document d'amélioration est fourni sous la forme d'un fichier " -"Markdown ayant la structure suivante" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "" -"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " -"préambule YAML)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Titre (le même que dans les métadonnées)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table des matières (si nécessaire)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Contraintes/Cavats (facultatif)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Détails de la conception (facultatif)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "Critères d'obtention du diplôme" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "À titre de référence, ce document suit la structure ci-dessus." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Métadonnées" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "" -"**numérofed** (Obligatoire) Le `numérofed` du dernier document " -"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " -"faire référence à d'autres propositions." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "" -"**status** (obligatoire) L'état actuel de la proposition. Voir " -"[workflow](#workflow) pour les états possibles." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 -msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "" -"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " -"s'agit simplement de l'identifiant GitHub." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "" -"**creation-date** (Obligatoire) Date à laquelle la proposition a été " -"soumise pour la première fois dans un RP." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "" -"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" -" été modifiée de manière significative pour la dernière fois." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "" -"**see-also** (Facultatif) Une liste d'autres propositions qui sont " -"pertinentes par rapport à celle-ci." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "" -"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " -"remplace." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "Flux de travail" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "" -"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " -"discussion ou d'une présentation au sein de la communauté. À ce titre, " -"elle a besoin d'un champion, généralement l'auteur, qui se charge de " -"l'amélioration. Cette personne doit également trouver des committers to " -"Flower prêts à examiner la proposition." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." -msgstr "" -"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " -"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " -"document d'amélioration de la fleur, dans `enhancements`. Toutes les " -"améliorations commencent à l'état `provisionnel` dans le cadre d'une " -"demande d'extraction. Les discussions sont effectuées dans le cadre de " -"l'examen de la demande d'extraction." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." -msgstr "" -"Une fois qu'une amélioration a été examinée et approuvée, son statut " -"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " -"des demandes d'extension séparées. Ces demandes d'extension doivent " -"mentionner l'amélioration concernée dans leur description. Une fois " -"l'implémentation réalisée, le statut de la proposition passe à " -"`implémented`." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "" -"Sous certaines conditions, d'autres états sont possibles. Une " -"amélioration a les états suivants :" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "" -"`provisoire` : L'amélioration a été proposée et est en cours de " -"définition. C'est l'état de départ pendant que la proposition est étoffée" -" et activement définie et discutée." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`implementable` : L'amélioration a été examinée et approuvée." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "" -"`implemented` : L'amélioration a été mise en œuvre et n'est plus " -"activement modifiée." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "" -"`deferred` : L'amélioration est proposée mais n'est pas activement " -"travaillée." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "" -"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" -" n'allait pas de l'avant." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "" -"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " -"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" -" obstacle pour les éventuels nouveaux contributeurs." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "" -"Élargir le modèle de proposition au-delà de la description d'une seule " -"phrase actuellement requise dans le modèle de questions sur les " -"caractéristiques peut constituer une lourde charge pour les personnes " -"dont l'anglais n'est pas la langue maternelle." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "Questions sur GitHub" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." -msgstr "" -"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" -" pourrait utiliser, par exemple, des balises pour les différencier et les" -" filtrer par rapport aux autres problèmes. Le principal problème concerne" -" la discussion et la révision d'une amélioration : les GitHub Issues " -"n'ont qu'un seul fil de discussion pour les commentaires. Les " -"améliorations ont généralement plusieurs fils de discussion en même temps" -" pour différentes parties de la documentation. La gestion de ces " -"multiples discussions peut être déroutante lorsque l'on utilise GitHub " -"Issues." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google Docs" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." -msgstr "" -"Les Google Docs permettent de multiplier les fils de discussion. Mais " -"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " -"à ce que la communauté puisse les découvrir. Une liste de liens vers " -"toutes les propositions doit être gérée et mise à la disposition de la " -"communauté. Par rapport à l'envoi de propositions dans le cadre du " -"référentiel de Flower, le risque de liens manquants est beaucoup plus " -"élevé." - -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Doc pour l'amélioration des fleurs" - #: ../../source/how-to-aggregate-evaluation-results.rst:2 #, fuzzy msgid "Aggregate evaluation results" @@ -6302,7 +5353,7 @@ msgstr "" "clients individuels. Les clients peuvent renvoyer des mesures " "personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-aggregate-evaluation-results.rst:39 +#: ../../source/how-to-aggregate-evaluation-results.rst:38 msgid "" "The server can then use a customized strategy to aggregate the metrics " "provided in these dictionaries:" @@ -6455,7 +5506,7 @@ msgid "" msgstr "" #: ../../source/how-to-authenticate-supernodes.rst:100 -#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-enable-tls-connections.rst:108 #: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" @@ -6471,21 +5522,16 @@ msgstr "" #: ../../source/how-to-configure-clients.rst:2 #, fuzzy -msgid "Configure clients" +msgid "Configure Clients" msgstr "Configurer les clients" #: ../../source/how-to-configure-clients.rst:4 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"Flower provides the ability to send configuration values to clients, " +"allowing server-side control over client behavior. This feature enables " +"flexible and dynamic adjustment of client-side hyperparameters, improving" +" collaboration and experimentation." msgstr "" -"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " -"configuration aux clients. Les valeurs de configuration peuvent être " -"utilisées à diverses fins. Elles constituent, par exemple, un moyen " -"populaire de contrôler les hyperparamètres côté client à partir du " -"serveur." #: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" @@ -6493,34 +5539,36 @@ msgstr "Valeurs de configuration" #: ../../source/how-to-configure-clients.rst:11 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These values must be of type ``Scalar``, which includes " +"``bool``, ``bytes``, ``float``, ``int``, or ``str`` (or equivalent types " +"in different languages). Scalar is the value type directly supported by " +"Flower for these configurations." msgstr "" -"Les valeurs de configuration sont représentées sous forme de dictionnaire" -" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " -"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " -"dans d'autres langages). Voici un exemple de dictionnaire de " -"configuration en Python :" -#: ../../source/how-to-configure-clients.rst:25 +#: ../../source/how-to-configure-clients.rst:17 +msgid "For example, a ``FitConfig`` dictionary might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:28 +#, fuzzy msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " +"Flower serializes these configuration dictionaries (or *config dicts* for" +" short) to their ProtoBuf representation, transports them to the client " "using gRPC, and then deserializes them back to Python dictionaries." msgstr "" "Flower sérialise ces dictionnaires de configuration (ou *config dict* en " "abrégé) dans leur représentation ProtoBuf, les transporte vers le client " "à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-configure-clients.rst:31 +#: ../../source/how-to-configure-clients.rst:34 +#, fuzzy msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"dictionaries. To send collections, convert them to a supported type " +"(e.g., JSON string) and decode on the client side." msgstr "" "Actuellement, il n'est pas possible d'envoyer directement des types de " "collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" @@ -6529,233 +5577,388 @@ msgstr "" "l'un des types de valeurs pris en charge (et en les reconvertissant du " "côté client)." -#: ../../source/how-to-configure-clients.rst:36 +#: ../../source/how-to-configure-clients.rst:38 +#, fuzzy +msgid "Example:" +msgstr "Exemples de PyTorch" + +#: ../../source/how-to-configure-clients.rst:51 +#, fuzzy +msgid "Configuration through Built-in Strategies" +msgstr "Configuration par le biais de stratégies intégrées" + +#: ../../source/how-to-configure-clients.rst:53 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"Flower provides configuration options to control client behavior " +"dynamically through ``FitConfig`` and ``EvaluateConfig``. These " +"configurations allow server-side control over client-side parameters such" +" as batch size, number of local epochs, learning rate, and evaluation " +"settings, improving collaboration and experimentation." msgstr "" -"On peut, par exemple, convertir une liste de nombres à virgule flottante " -"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " -"de configuration, et enfin reconvertir la chaîne JSON en une liste de " -"nombres à virgule flottante sur le client." -#: ../../source/how-to-configure-clients.rst:41 -msgid "Configuration through built-in strategies" -msgstr "Configuration par le biais de stratégies intégrées" +#: ../../source/how-to-configure-clients.rst:59 +#, fuzzy +msgid "``FitConfig`` and ``EvaluateConfig``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-configure-clients.rst:61 +msgid "" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These dictionaries enable the server to adjust client-" +"side hyperparameters and monitor progress effectively." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:67 +#, fuzzy +msgid "``FitConfig``" +msgstr "Configurer les clients" + +#: ../../source/how-to-configure-clients.rst:69 +msgid "" +"``FitConfig`` specifies the hyperparameters for training rounds, such as " +"the batch size, number of local epochs, and other parameters that " +"influence training." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:72 +msgid "For example, a ``fit_config`` callback might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:90 +msgid "" +"You can then pass this ``fit_config`` callback to a built-in strategy " +"such as ``FedAvg``:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:43 +#: ../../source/how-to-configure-clients.rst:101 +msgid "" +"On the client side, the configuration is received in the ``fit`` method, " +"where it can be read and used:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:124 #, fuzzy +msgid "``EvaluateConfig``" +msgstr ":code:`évaluer`" + +#: ../../source/how-to-configure-clients.rst:126 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like ``FedAvg``. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the " -"current round. It then forwards the configuration dictionary to all the " -"clients selected during that round." +"``EvaluateConfig`` specifies hyperparameters for the evaluation process, " +"such as the batch size, evaluation frequency, or metrics to compute " +"during evaluation." msgstr "" -"La façon la plus simple d'envoyer des valeurs de configuration aux " -"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " -"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" -" de configuration. Une fonction de configuration est une fonction que la " -"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " -"pour le tour en cours. Elle transmet ensuite le dictionnaire de " -"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-configure-clients.rst:49 +#: ../../source/how-to-configure-clients.rst:129 +msgid "For example, an ``evaluate_config`` callback might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:143 +msgid "" +"You can pass this ``evaluate_config`` callback to a built-in strategy " +"like ``FedAvg``:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:151 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"On the client side, the configuration is received in the ``evaluate`` " +"method, where it can be used during the evaluation process:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:175 +msgid "Example: Sending Training Configurations" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:177 +#, fuzzy +msgid "" +"Imagine we want to send (a) the batch size, (b) the current global round," +" and (c) the number of local epochs. Our configuration function could " +"look like this:" msgstr "" "Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" " la taille du lot que le client doit utiliser, (b) le cycle global actuel" " de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " "client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-configure-clients.rst:65 +#: ../../source/how-to-configure-clients.rst:190 +msgid "" +"To use this function with a built-in strategy like ``FedAvg``, pass it to" +" the ``FedAvg`` constructor (typically in your ``server_fn``):" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:211 #, fuzzy +msgid "Client-Side Configuration" +msgstr "Logique côté client" + +#: ../../source/how-to-configure-clients.rst:213 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -"``on_fit_config_fn``:" +"On the client side, configurations are received as input to the ``fit`` " +"and ``evaluate`` methods. For example:" msgstr "" -"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " -"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" -" :code:`on_fit_config_fn` :" -#: ../../source/how-to-configure-clients.rst:75 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" +#: ../../source/how-to-configure-clients.rst:230 +msgid "Dynamic Configurations per Round" +msgstr "" -#: ../../source/how-to-configure-clients.rst:86 +#: ../../source/how-to-configure-clients.rst:232 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"Configuration functions are called at the beginning of every round. This " +"allows for dynamic adjustments based on progress. For example, you can " +"increase the number of local epochs in later rounds:" msgstr "" -"Il existe également une fonction `on_evaluate_config_fn` pour configurer " -"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " -"séparées car on peut vouloir envoyer différentes valeurs de configuration" -" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-configure-clients.rst:90 +#: ../../source/how-to-configure-clients.rst:247 +msgid "Customizing Client Configurations" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:249 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"In some cases, it may be necessary to send different configurations to " +"individual clients. To achieve this, you can create a custom strategy by " +"extending a built-in one, such as ``FedAvg``:" msgstr "" -"Les stratégies intégrées appellent cette fonction à chaque tour " -"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " -"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" -" à chaque tour nous permet de varier/changer le dict de config au cours " -"de tours consécutifs. Si nous voulions mettre en place un calendrier " -"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " -"locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-configure-clients.rst:107 -#, fuzzy -msgid "The ``FedAvg`` strategy will call this function *every round*." -msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." +#: ../../source/how-to-configure-clients.rst:254 +msgid "Example: Client-Specific Configuration" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:273 +msgid "Next, use this custom strategy as usual:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:287 +msgid "Summary of Enhancements" +msgstr "" -#: ../../source/how-to-configure-clients.rst:110 -msgid "Configuring individual clients" -msgstr "Configuration des clients individuels" +#: ../../source/how-to-configure-clients.rst:289 +msgid "**Dynamic Configurations**: Enables per-round adjustments via functions." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:290 +msgid "**Advanced Customization**: Supports client-specific strategies." +msgstr "" -#: ../../source/how-to-configure-clients.rst:112 +#: ../../source/how-to-configure-clients.rst:291 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"**Client-Side Integration**: Configurations accessible in ``fit`` and " +"``evaluate``." msgstr "" -"Dans certains cas, il est nécessaire d'envoyer des valeurs de " -"configuration différentes à des clients différents." -#: ../../source/how-to-configure-clients.rst:115 +#: ../../source/how-to-design-stateful-clients.rst:2 #, fuzzy +msgid "Design stateful ClientApps" +msgstr "client" + +#: ../../source/how-to-design-stateful-clients.rst:20 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " -"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " -"the config dict of a *single client* (only the first client in the list, " -"the other clients in this round to not receive this \"special\" config " -"value):" +"By design, ClientApp_ objects are stateless. This means that the " +"``ClientApp`` object is recreated each time a new ``Message`` is to be " +"processed. This behaviour is identical with Flower's Simulation Engine " +"and Deployment Engine. For the former, it allows us to simulate the " +"running of a large number of nodes on a single machine or across multiple" +" machines. For the latter, it enables each ``SuperNode`` to be part of " +"multiple runs, each running a different ``ClientApp``." msgstr "" -"Ceci peut être réalisé en personnalisant une stratégie existante ou en " -"`mettant en œuvre une stratégie personnalisée à partir de zéro " -"`_. " -"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " -"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" -" au config dict d'un *seul client* (uniquement le premier client de la " -"liste, les autres clients de cette série ne recevant pas cette valeur de " -"configuration \"spéciale\") :" -#: ../../source/how-to-configure-logging.rst:2 -#, fuzzy -msgid "Configure logging" -msgstr "Configurer les clients" +#: ../../source/how-to-design-stateful-clients.rst:27 +msgid "" +"When a ``ClientApp`` is executed it receives a Context_. This context is " +"unique for each ``ClientApp``, meaning that subsequent executions of the " +"same ``ClientApp`` from the same node will receive the same ``Context`` " +"object. In the ``Context``, the ``.state`` attribute can be used to store" +" information that you would like the ``ClientApp`` to have access to for " +"the duration of the run. This could be anything from intermediate results" +" such as the history of training losses (e.g. as a list of `float` values" +" with a new entry appended each time the ``ClientApp`` is executed), " +"certain parts of the model that should persist at the client side, or " +"some other arbitrary Python objects. These items would need to be " +"serialized before saving them into the context." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:38 +msgid "Saving metrics to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:40 +msgid "" +"This section will demonstrate how to save metrics such as accuracy/loss " +"values to the Context_ so they can be used in subsequent executions of " +"the ``ClientApp``. If your ``ClientApp`` makes use of NumPyClient_ then " +"entire object is also re-created for each call to methods like ``fit()`` " +"or ``evaluate()``." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:45 +msgid "" +"Let's begin with a simple setting in which ``ClientApp`` is defined as " +"follows. The ``evaluate()`` method only generates a random number and " +"prints it." +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/how-to-design-stateful-clients.rst:50 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +"You can create a PyTorch project with ready-to-use ``ClientApp`` and " +"other components by running ``flwr new``." msgstr "" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/how-to-design-stateful-clients.rst:81 msgid "" -"containing relevant information including: log message level (e.g. " -"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" +"Let's say we want to save that randomly generated integer and append it " +"to a list that persists in the context. To do that, you'll need to do two" +" key things:" msgstr "" -#: ../../source/how-to-configure-logging.rst:35 -msgid "Saving log to file" +#: ../../source/how-to-design-stateful-clients.rst:84 +msgid "Make the ``context.state`` reachable withing your client class" msgstr "" -#: ../../source/how-to-configure-logging.rst:37 +#: ../../source/how-to-design-stateful-clients.rst:85 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do ``fl.server.start_server``) and when using " -"the ``VirtualClientEngine`` (i.e. when you do " -"``fl.simulation.start_simulation``). In some situations you might want to" -" save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"Initialise the appropiate record type (in this example we use " +"ConfigsRecord_) and save/read your entry when required." msgstr "" -#: ../../source/how-to-configure-logging.rst:59 +#: ../../source/how-to-design-stateful-clients.rst:123 +msgid "" +"If you run the app, you'll see an output similar to the one below. See " +"how after each round the `n_val` entry in the context gets one additional" +" integer ? Note that the order in which the `ClientApp` logs these " +"messages might differ slightly between rounds." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:146 +msgid "Saving model parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:148 +msgid "" +"Using ConfigsRecord_ or MetricsRecord_ to save \"simple\" components is " +"fine (e.g., float, integer, boolean, string, bytes, and lists of these " +"types. Note that MetricsRecord_ only supports float, integer, and lists " +"of these types) Flower has a specific type of record, a " +"ParametersRecord_, for storing model parameters or more generally data " +"arrays." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:153 +msgid "" +"Let's see a couple of examples of how to save NumPy arrays first and then" +" how to save parameters of PyTorch and TensorFlow models." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:158 +msgid "" +"The examples below omit the definition of a ``ClientApp`` to keep the " +"code blocks concise. To make use of ``ParametersRecord`` objects in your " +"``ClientApp`` you can follow the same principles as outlined earlier." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:163 #, fuzzy +msgid "Saving NumPy arrays to the context" +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: ../../source/how-to-design-stateful-clients.rst:165 msgid "" -"With the above, Flower will record the log you see on your terminal to " -"``log.txt``. This file will be created in the same directory as were you " -"are running the code from. If we inspect we see the log above is also " -"recorded but prefixing with ``identifier`` each line:" +"Elements stored in a `ParametersRecord` are of type Array_, which is a " +"data structure that holds ``bytes`` and metadata that can be used for " +"deserialization. Let's see how to create an ``Array`` from a NumPy array " +"and insert it into a ``ParametersRecord``. Here we will make use of the " +"built-in serialization and deserialization mechanisms in Flower, namely " +"the ``flwr.common.array_from_numpy`` function and the `numpy()` method of" +" an Array_ object." msgstr "" -"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " -"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " -"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " -"log ci-dessous est également enregistré mais préfixé avec " -":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-configure-logging.rst:81 -msgid "Log your own messages" -msgstr "Loggez vos propres messages" +#: ../../source/how-to-design-stateful-clients.rst:174 +msgid "" +"Array_ objects carry bytes as their main payload and additional metadata " +"to use for deserialization. You can implement your own " +"serialization/deserialization if the provided ``array_from_numpy`` " +"doesn't fit your usecase." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:178 +msgid "" +"Let's see how to use those functions to store a NumPy array into the " +"context." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:206 +msgid "" +"To extract the data in a ``ParametersRecord``, you just need to " +"deserialize the array if interest. For example, following the example " +"above:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:223 +msgid "Saving PyTorch parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:225 +msgid "" +"Following the NumPy example above, to save parameters of a PyTorch model " +"a straightforward way of doing so is to transform the parameters into " +"their NumPy representation and then proceed as shown earlier. Below is a " +"simple self-contained example for how to do this." +msgstr "" -#: ../../source/how-to-configure-logging.rst:83 +#: ../../source/how-to-design-stateful-clients.rst:263 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +"Let say now you want to apply the parameters stored in your context to a " +"new instance of the model (as it happens each time a ``ClientApp`` is " +"executed). You will need to:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:266 +msgid "Deserialize each element in your specific ``ParametersRecord``" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:267 +msgid "Construct a ``state_dict`` and load it" msgstr "" -#: ../../source/how-to-configure-logging.rst:114 +#: ../../source/how-to-design-stateful-clients.rst:287 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"And that's it! Recall that even though this example shows how to store " +"the entire ``state_dict`` in a ``ParametersRecord``, you can just save " +"part of it. The process would be identical, but you might need to adjust " +"how it is loaded into an existing model using PyTorch APIs." msgstr "" -#: ../../source/how-to-configure-logging.rst:140 -msgid "Log to a remote service" +#: ../../source/how-to-design-stateful-clients.rst:293 +msgid "Saving Tensorflow/Keras parameters to the context" msgstr "" -#: ../../source/how-to-configure-logging.rst:142 +#: ../../source/how-to-design-stateful-clients.rst:295 msgid "" -"The ``fl.common.logger.configure`` function, also allows specifying a " -"host to which logs can be pushed (via ``POST``) through a native Python " -"``logging.handler.HTTPHandler``. This is a particularly useful feature in" -" ``gRPC``-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a ``HTTPHandler`` should you " -"wish to backup or analyze the logs somewhere else." +"Follow the same steps as done above but replace the ``state_dict`` logic " +"with simply `get_weights() " +"`_" +" to convert the model parameters to a list of NumPy arrays that can then " +"be serialized into an ``Array``. Then, after deserialization, use " +"`set_weights() " +"`_" +" to apply the new parameters to a model." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 +#: ../../source/how-to-enable-tls-connections.rst:2 #, fuzzy -msgid "Enable SSL connections" +msgid "Enable TLS connections" msgstr "Collecte centralisée des données" -#: ../../source/how-to-enable-ssl-connections.rst:4 +#: ../../source/how-to-enable-tls-connections.rst:4 #, fuzzy msgid "" -"This guide describes how to a SSL-enabled secure Flower server " +"This guide describes how to a TLS-enabled secure Flower server " "(``SuperLink``) can be started and how a Flower client (``SuperNode``) " "can establish a secure connections to it." msgstr "" "Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " "comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:8 +#: ../../source/how-to-enable-tls-connections.rst:8 #, fuzzy msgid "" "A complete code example demonstrating a secure connection can be found " @@ -6766,11 +5969,11 @@ msgstr "" "trouvé ici `_." -#: ../../source/how-to-enable-ssl-connections.rst:11 +#: ../../source/how-to-enable-tls-connections.rst:11 #, fuzzy msgid "" "The code example comes with a ``README.md`` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less " +"start it. Although it is already TLS-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "" @@ -6779,14 +5982,14 @@ msgstr "" "moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " "une introduction plus approfondie sur le sujet." -#: ../../source/how-to-enable-ssl-connections.rst:16 +#: ../../source/how-to-enable-tls-connections.rst:16 msgid "Certificates" msgstr "Certificats" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/how-to-enable-tls-connections.rst:18 #, fuzzy msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " +"Using TLS-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " "to ask you to run the script in ``examples/advanced-" @@ -6800,7 +6003,7 @@ msgstr "" "d'exécuter le script dans :code:`examples/advanced-" "tensorflow/certificates/generate.sh`" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/how-to-enable-tls-connections.rst:29 #, fuzzy msgid "" "This will generate the certificates in ``examples/advanced-" @@ -6809,10 +6012,10 @@ msgstr "" "Cela générera les certificats dans :code:`examples/advanced-" "tensorflow/.cache/certificates`." -#: ../../source/how-to-enable-ssl-connections.rst:32 +#: ../../source/how-to-enable-tls-connections.rst:32 #, fuzzy msgid "" -"The approach for generating SSL certificates in the context of this " +"The approach for generating TLS certificates in the context of this " "example can serve as an inspiration and starting point, but it should not" " be used as a reference for production environments. Please refer to " "other sources regarding the issue of correctly generating certificates " @@ -6824,33 +6027,35 @@ msgstr "" "servir d'inspiration et de point de départ, mais ne doit pas être " "considérée comme complète pour les environnements de production." -#: ../../source/how-to-enable-ssl-connections.rst:40 +#: ../../source/how-to-enable-tls-connections.rst:40 #, fuzzy msgid "Server (SuperLink)" msgstr "flower-superlink" -#: ../../source/how-to-enable-ssl-connections.rst:42 +#: ../../source/how-to-enable-tls-connections.rst:42 #, fuzzy msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"Navigate to the ``examples/advanced-tensorflow`` folder (`here " +"`_) and use the following terminal command to start a server " +"(SuperLink) that uses the previously generated certificates:" msgstr "" "Nous allons maintenant montrer comment écrire un client qui utilise les " "scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:52 +#: ../../source/how-to-enable-tls-connections.rst:54 msgid "" "When providing certificates, the server expects a tuple of three " "certificates paths: CA certificate, server certificate and server private" " key." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/how-to-enable-tls-connections.rst:58 #, fuzzy -msgid "Client (SuperNode)" +msgid "Clients (SuperNode)" msgstr "Codes d'état du client." -#: ../../source/how-to-enable-ssl-connections.rst:58 +#: ../../source/how-to-enable-tls-connections.rst:60 #, fuzzy msgid "" "Use the following terminal command to start a client (SuperNode) that " @@ -6859,7 +6064,7 @@ msgstr "" "Nous allons maintenant montrer comment écrire un client qui utilise les " "scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:67 +#: ../../source/how-to-enable-tls-connections.rst:71 #, fuzzy msgid "" "When setting ``root_certificates``, the client expects a file path to " @@ -6870,23 +6075,62 @@ msgstr "" "utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " "certificats sous forme de chaînes d'octets." -#: ../../source/how-to-enable-ssl-connections.rst:73 +#: ../../source/how-to-enable-tls-connections.rst:74 +#, fuzzy +msgid "" +"In another terminal, start a second SuperNode that uses the same " +"certificates:" +msgstr "" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" + +#: ../../source/how-to-enable-tls-connections.rst:84 +msgid "" +"Note that in the second SuperNode, if you run both on the same machine, " +"you must specify a different port for the ``ClientAppIO`` API address to " +"avoid clashing with the first SuperNode." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:89 +msgid "Executing ``flwr run`` with TLS" +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:91 +msgid "" +"The root certificates used for executing ``flwr run`` is specified in the" +" ``pyproject.toml`` of your app." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:100 +msgid "" +"Note that the path to the ``root-certificates`` is relative to the root " +"of the project. Now, you can run the example by executing the following:" +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:110 #, fuzzy msgid "" "You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"using the given script, start an TLS-enabled server and have two clients " +"establish secure connections to it. You should also have learned how to " +"run your Flower project using ``flwr run`` with TLS enabled." msgstr "" "Tu devrais maintenant avoir appris à générer des certificats auto-signés " "à l'aide du script donné, à démarrer un serveur compatible SSL et à " "demander à un client d'établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:78 +#: ../../source/how-to-enable-tls-connections.rst:117 +msgid "" +"For running a Docker setup with TLS enabled, please refer to :doc:`docker" +"/enable-tls`." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:121 #, fuzzy msgid "Additional resources" msgstr "Ressources supplémentaires" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-tls-connections.rst:123 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" @@ -6894,14 +6138,125 @@ msgstr "" "Ces sources supplémentaires peuvent être pertinentes si tu souhaites " "approfondir le sujet des certificats :" -#: ../../source/how-to-enable-ssl-connections.rst:83 +#: ../../source/how-to-enable-tls-connections.rst:126 msgid "`Let's Encrypt `_" msgstr "`Let's Encrypt `_" -#: ../../source/how-to-enable-ssl-connections.rst:84 +#: ../../source/how-to-enable-tls-connections.rst:127 msgid "`certbot `_" msgstr "`certbot `_" +#: ../../source/how-to-implement-fedbn.rst:2 +#, fuzzy +msgid "Implement FedBN" +msgstr "Mettre en place des stratégies" + +#: ../../source/how-to-implement-fedbn.rst:4 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training method " +"designed for non-IID data. We are using PyTorch to train a Convolutional " +"Neural Network (with Batch Normalization layers) on the CIFAR-10 dataset." +" When applying FedBN, only minor changes are needed compared to " +":doc:`Quickstart PyTorch `." +msgstr "" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante avec `FedBN `_, une stratégie" +" de formation fédérée conçue pour les données non-identifiées. Nous " +"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " +"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " +"Lors de l'application de FedBN, seules quelques modifications sont " +"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " +"fédération `_." + +#: ../../source/how-to-implement-fedbn.rst:12 +#, fuzzy +msgid "Model" +msgstr "Entraîne le modèle" + +#: ../../source/how-to-implement-fedbn.rst:14 +msgid "" +"A full introduction to federated learning with PyTorch and Flower can be " +"found in :doc:`Quickstart PyTorch `. This " +"how-to guide varies only a few details in ``task.py``. FedBN requires a " +"model architecture (defined in class ``Net()``) that uses Batch " +"Normalization layers:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:45 +msgid "" +"Try editing the model architecture, then run the project to ensure " +"everything still works:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:52 +msgid "" +"So far this should all look fairly familiar if you've used Flower with " +"PyTorch before." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:55 +#, fuzzy +msgid "FedBN" +msgstr "DP-FedAvg" + +#: ../../source/how-to-implement-fedbn.rst:57 +msgid "" +"To adopt FedBN, only the ``get_parameters`` and ``set_parameters`` " +"functions in ``task.py`` need to be revised. FedBN only changes the " +"client-side by excluding batch normalization parameters from being " +"exchanged with the server." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:61 +#, fuzzy +msgid "" +"We revise the *client* logic by changing ``get_parameters`` and " +"``set_parameters`` in ``task.py``. The batch normalization parameters are" +" excluded from model parameter list when sending to or receiving from the" +" server:" +msgstr "" +"Enfin, nous allons réviser notre logique *client* en modifiant " +":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " +"nous allons exclure les paramètres de normalisation des lots de la liste " +"des paramètres du modèle lors de l'envoi ou de la réception depuis le " +"serveur." + +#: ../../source/how-to-implement-fedbn.rst:90 +msgid "To test the new appraoch, run the project again:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:96 +msgid "" +"Your PyTorch project now runs federated learning with FedBN. " +"Congratulations!" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:99 +msgid "Next Steps" +msgstr "Prochaines étapes" + +#: ../../source/how-to-implement-fedbn.rst:101 +#, fuzzy +msgid "" +"The example is of course over-simplified since all clients load the exact" +" same dataset. This isn't realistic. You now have the tools to explore " +"this topic further. How about using different subsets of CIFAR-10 on each" +" client? How about adding more clients?" +msgstr "" +"Le code source complet de cet exemple se trouve ici " +"`_. Notre exemple est bien sûr un peu trop " +"simplifié parce que les deux clients chargent exactement le même ensemble" +" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " +"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " +"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " +"?" + #: ../../source/how-to-implement-strategies.rst:2 #, fuzzy msgid "Implement strategies" @@ -7320,7 +6675,6 @@ msgid "Install stable release" msgstr "Installe la version stable" #: ../../source/how-to-install-flower.rst:14 -#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "" @@ -7439,743 +6793,628 @@ msgstr "" "Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" "nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-monitor-simulation.rst:2 +#: ../../source/how-to-run-simulations.rst:22 #, fuzzy -msgid "Monitor simulation" +msgid "Run simulations" msgstr "Simulation de moniteur" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/how-to-run-simulations.rst:24 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Simulating Federated Learning workloads is useful for a multitude of use " +"cases: you might want to run your workload on a large cohort of clients " +"without having to source, configure, and manage a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without going through " +"a complex setup process; you might want to validate your algorithm in " +"different scenarios at varying levels of data and system heterogeneity, " +"client availability, privacy budgets, etc. These are among some of the " +"use cases where simulating FL workloads makes sense." msgstr "" -"Flower te permet de surveiller les ressources du système pendant " -"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " -"est puissant et te permet de décider comment allouer les ressources par " -"manière de client et de limiter l'utilisation totale. Les informations " -"sur la consommation des ressources peuvent t'aider à prendre des " -"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-monitor-simulation.rst:9 +#: ../../source/how-to-run-simulations.rst:33 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"Flower's ``Simulation Engine`` schedules, launches, and manages " +"|clientapp_link|_ instances. It does so through a ``Backend``, which " +"contains several workers (i.e., Python processes) that can execute a " +"``ClientApp`` by passing it a |context_link|_ and a |message_link|_. " +"These ``ClientApp`` objects are identical to those used by Flower's " +"`Deployment Engine `_, making " +"alternating between *simulation* and *deployment* an effortless process. " +"The execution of ``ClientApp`` objects through Flower's ``Simulation " +"Engine`` is:" msgstr "" -"Les instructions spécifiques supposent que tu utilises macOS et que le " -"gestionnaire de paquets `Homebrew `_ est installé." - -#: ../../source/how-to-monitor-simulation.rst:13 -msgid "Downloads" -msgstr "Téléchargements" -#: ../../source/how-to-monitor-simulation.rst:19 +#: ../../source/how-to-run-simulations.rst:41 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"**Resource-aware**: Each backend worker executing ``ClientApp``\\s gets " +"assigned a portion of the compute and memory on your system. You can " +"define these at the beginning of the simulation, allowing you to control " +"the degree of parallelism of your simulation. For a fixed total pool of " +"resources, the fewer the resources per backend worker, the more " +"``ClientApps`` can run concurrently on the same hardware." msgstr "" -"`Prometheus `_ est utilisé pour la collecte de " -"données, tandis que `Grafana `_ te permettra de " -"visualiser les données collectées. Ils sont tous deux bien intégrés à " -"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-monitor-simulation.rst:23 +#: ../../source/how-to-run-simulations.rst:46 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +"**Batchable**: When there are more ``ClientApps`` to execute than backend" +" workers, ``ClientApps`` are queued and executed as soon as resources are" +" freed. This means that ``ClientApps`` are typically executed in batches " +"of N, where N is the number of backend workers." msgstr "" -"Écrase les fichiers de configuration (selon ton appareil, il se peut " -"qu'il soit installé sur un chemin différent)." -#: ../../source/how-to-monitor-simulation.rst:26 -msgid "If you are on an M1 Mac, it should be:" -msgstr "Si tu es sur un Mac M1, il devrait l'être :" - -#: ../../source/how-to-monitor-simulation.rst:33 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/how-to-run-simulations.rst:50 +msgid "" +"**Self-managed**: This means that you, as a user, do not need to launch " +"``ClientApps`` manually; instead, the ``Simulation Engine``'s internals " +"orchestrates the execution of all ``ClientApp``\\s." msgstr "" -"Sur les appareils Mac Intel de la génération précédente, ce devrait être " -"le cas :" -#: ../../source/how-to-monitor-simulation.rst:40 +#: ../../source/how-to-run-simulations.rst:53 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"**Ephemeral**: This means that a ``ClientApp`` is only materialized when " +"it is required by the application (e.g., to do `fit() `_). The object is destroyed afterward, " +"releasing the resources it was assigned and allowing other clients to " +"participate." msgstr "" -"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " -"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-monitor-simulation.rst:51 +#: ../../source/how-to-run-simulations.rst:60 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"You can preserve the state (e.g., internal variables, parts of an ML " +"model, intermediate results) of a ``ClientApp`` by saving it to its " +"``Context``. Check the `Designing Stateful Clients `_ guide for a complete walkthrough." msgstr "" -"puis supprime tout le texte du fichier et colle une nouvelle " -"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " -"intervalles de temps à tes besoins :" -#: ../../source/how-to-monitor-simulation.rst:67 +#: ../../source/how-to-run-simulations.rst:65 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"The ``Simulation Engine`` delegates to a ``Backend`` the role of spawning" +" and managing ``ClientApps``. The default backend is the ``RayBackend``, " +"which uses `Ray `_, an open-source framework for " +"scalable Python workloads. In particular, each worker is an `Actor " +"`_ capable of " +"spawning a ``ClientApp`` given its ``Context`` and a ``Message`` to " +"process." msgstr "" -"Maintenant, après avoir édité la configuration de Prometheus, fais de " -"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " -"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-monitor-simulation.rst:78 -msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +#: ../../source/how-to-run-simulations.rst:73 +msgid "Launch your Flower simulation" msgstr "" -"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " -"configuration suivante comme précédemment." -#: ../../source/how-to-monitor-simulation.rst:94 +#: ../../source/how-to-run-simulations.rst:75 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +"Running a simulation is straightforward; in fact, it is the default mode " +"of operation for |flwr_run_link|_. Therefore, running Flower simulations " +"primarily requires you to first define a ``ClientApp`` and a " +"``ServerApp``. A convenient way to generate a minimal but fully " +"functional Flower app is by means of the |flwr_new_link|_ command. There " +"are multiple templates to choose from. The example below uses the " +"``PyTorch`` template." msgstr "" -"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " -"suivi des métriques, maintenant, démarrons-le." - -#: ../../source/how-to-monitor-simulation.rst:98 -msgid "Tracking metrics" -msgstr "Suivi des mesures" -#: ../../source/how-to-monitor-simulation.rst:100 +#: ../../source/how-to-run-simulations.rst:83 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"If you haven't already, install Flower via ``pip install -U flwr`` in a " +"Python environment." msgstr "" -"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " -"surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-run-simulations.rst:91 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"Then, follow the instructions shown after completing the |flwr_new_link|_" +" command. When you execute |flwr_run_link|_, you'll be using the " +"``Simulation Engine``." msgstr "" -"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " -"démarres une simulation." - -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "Now, you are ready to start your workload." -msgstr "Maintenant, tu es prêt à commencer ta charge de travail." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-run-simulations.rst:94 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"If we take a look at the ``pyproject.toml`` that was generated from the " +"|flwr_new_link|_ command (and loaded upon |flwr_run_link|_ execution), we" +" see that a *default* federation is defined. It sets the number of " +"supernodes to 10." msgstr "" -"Peu de temps après le début de la simulation, tu devrais voir les " -"journaux suivants dans ton terminal :" -#: ../../source/how-to-monitor-simulation.rst:127 -#, fuzzy -msgid "You can look at everything at http://127.0.0.1:8265 ." -msgstr "Tu peux tout regarder sur ``_ ." - -#: ../../source/how-to-monitor-simulation.rst:129 +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +"You can modify the size of your simulations by adjusting ``options.num-" +"supernodes``." msgstr "" -"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" -" panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-run-simulations.rst:109 +#, fuzzy +msgid "Simulation examples" +msgstr "Exemples de PyTorch" + +#: ../../source/how-to-run-simulations.rst:111 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"In addition to the quickstart tutorials in the documentation (e.g., " +"`quickstart PyTorch Tutorial `_, " +"`quickstart JAX Tutorial `_), most examples" +" in the Flower repository are simulation-ready." msgstr "" -"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " -"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " -"Ray n'est accessible que pendant la simulation. Une fois la simulation " -"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" -" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." -#: ../../source/how-to-monitor-simulation.rst:137 +#: ../../source/how-to-run-simulations.rst:116 #, fuzzy msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port ``3000`` on your" -" machine as long as they are running." +"`Quickstart TensorFlow/Keras " +"`_." msgstr "" -"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" -" important car sinon ils bloqueront, par exemple, le port :code:`3000` " -"sur ta machine tant qu'ils seront en cours d'exécution." - -#: ../../source/how-to-monitor-simulation.rst:147 -msgid "Resource allocation" -msgstr "Allocation des ressources" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/how-to-monitor-simulation.rst:149 +#: ../../source/how-to-run-simulations.rst:118 +#, fuzzy msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"`Quickstart PyTorch `_" msgstr "" -"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " -"efficacement les ressources du système aux clients de simulation de ton " -"côté." +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/how-to-monitor-simulation.rst:152 +#: ../../source/how-to-run-simulations.rst:120 +#, fuzzy msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"`Advanced PyTorch `_" msgstr "" -"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " -"avec toutes les ressources disponibles sur le système, qu'elle partage " -"entre les clients. Cela ne signifie pas qu'elle les divise de manière " -"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " -"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " -"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " -"suit :" - -#: ../../source/how-to-monitor-simulation.rst:164 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-run-simulations.rst:122 +#, fuzzy msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"`Quickstart MLX `_" msgstr "" -"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " -"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " -"toutes) :" - -#: ../../source/how-to-monitor-simulation.rst:195 -msgid "Let’s also specify the resource for a single client." -msgstr "Spécifions également la ressource pour un seul client." +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/how-to-monitor-simulation.rst:225 +#: ../../source/how-to-run-simulations.rst:123 +#, fuzzy msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"`ViT fine-tuning `_" msgstr "" -"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " -"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" -" lorsque les ressources le permettront." +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-run-simulations.rst:125 #, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " -"two clients and therefore enable them to run concurrently. Be careful not" -" to require more resources than available. If you specified " -"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" -" GPUs but decided to set 1 in ``ray_init_args``)." +"The complete list of examples can be found in `the Flower GitHub " +"`_." msgstr "" -"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " -"ne fonctionneront pas simultanément. En définissant " -":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " -"les faire fonctionner simultanément. Fais attention à ne pas demander " -"plus de ressources que celles disponibles. Si tu as spécifié " -":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " -"as 2 GPU mais que tu as décidé d'en définir 1 dans " -":code:`ray_init_args`)." - -#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "FAQ" +"Un exemple de code complet démontrant une connexion sécurisée peut être " +"trouvé ici `_." -#: ../../source/how-to-monitor-simulation.rst:237 -msgid "Q: I don't see any metrics logged." -msgstr "Q : Je ne vois aucune mesure enregistrée." +#: ../../source/how-to-run-simulations.rst:131 +msgid "Defining ``ClientApp`` resources" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:239 +#: ../../source/how-to-run-simulations.rst:133 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"By default, the ``Simulation Engine`` assigns two CPU cores to each " +"backend worker. This means that if your system has 10 CPU cores, five " +"backend workers can be running in parallel, each executing a different " +"``ClientApp`` instance." msgstr "" -"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" -" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " -"défaut). Modifie le délai pour qu'il corresponde à la période pendant " -"laquelle la simulation s'est déroulée." -#: ../../source/how-to-monitor-simulation.rst:243 +#: ../../source/how-to-run-simulations.rst:137 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"More often than not, you would probably like to adjust the resources your" +" ``ClientApp`` gets assigned based on the complexity (i.e., compute and " +"memory footprint) of your workload. You can do so by adjusting the " +"backend resources for your federation." msgstr "" -"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " -"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" -" l'onglet Métriques dans Ray Dashboard." -#: ../../source/how-to-monitor-simulation.rst:246 +#: ../../source/how-to-run-simulations.rst:143 +#, python-format msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"Note that the resources the backend assigns to each worker (and hence to " +"each ``ClientApp`` being executed) are assigned in a *soft* manner. This " +"means that the resources are primarily taken into account in order to " +"control the degree of parallelism at which ``ClientApp`` instances should" +" be executed. Resource assignment is **not strict**, meaning that if you " +"specified your ``ClientApp`` is assumed to make use of 25% of the " +"available VRAM but it ends up using 50%, it might cause other " +"``ClientApp`` instances to crash throwing an out-of-memory (OOM) error." msgstr "" -"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " -"services en cours d'exécution" -#: ../../source/how-to-monitor-simulation.rst:252 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"Q: I see \"This site can't be reached\" when going to " -"http://127.0.0.1:8265." +"Customizing resources can be done directly in the ``pyproject.toml`` of " +"your app." msgstr "" -"Q : Je vois \"This site can't be reached\" quand je vais sur " -"``_." -#: ../../source/how-to-monitor-simulation.rst:254 +#: ../../source/how-to-run-simulations.rst:160 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"With the above backend settings, your simulation will run as many " +"``ClientApps`` in parallel as CPUs you have in your system. GPU resources" +" for your ``ClientApp`` can be assigned by specifying the **ratio** of " +"VRAM each should make use of." msgstr "" -"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " -"Prometheus." - -#: ../../source/how-to-monitor-simulation.rst:257 -msgid "Resources" -msgstr "Ressources" -#: ../../source/how-to-monitor-simulation.rst:259 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:173 msgid "" -"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" -"started.html" +"If you are using TensorFlow, you need to `enable memory growth " +"`_ so " +"multiple ``ClientApp`` instances can share a GPU. This needs to be done " +"before launching the simulation. To do so, set the environment variable " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"``." msgstr "" -"Tableau de bord Ray : ``_" -#: ../../source/how-to-monitor-simulation.rst:261 -#, fuzzy -msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +#: ../../source/how-to-run-simulations.rst:179 +msgid "" +"Let's see how the above configuration results in a different number of " +"``ClientApps`` running in parallel depending on the resources available " +"in your system. If your system has:" msgstr "" -"Ray Metrics : ``_" - -#: ../../source/how-to-run-simulations.rst:2 -#, fuzzy -msgid "Run simulations" -msgstr "Simulation de moniteur" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-run-simulations.rst:183 +#, python-format msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"10x CPUs and 1x GPU: at most 4 ``ClientApps`` will run in parallel since " +"each requires 25% of the available VRAM." msgstr "" -#: ../../source/how-to-run-simulations.rst:19 +#: ../../source/how-to-run-simulations.rst:185 msgid "" -"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -"``VirtualClientEngine`` are:" +"10x CPUs and 2x GPUs: at most 8 ``ClientApps`` will run in parallel " +"(VRAM-limited)." msgstr "" -#: ../../source/how-to-run-simulations.rst:26 +#: ../../source/how-to-run-simulations.rst:186 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"6x CPUs and 4x GPUs: at most 6 ``ClientApps`` will run in parallel (CPU-" +"limited)." msgstr "" -#: ../../source/how-to-run-simulations.rst:31 +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to ``VirtualClientEngine``'s " -"internals." +"10x CPUs but 0x GPUs: you won't be able to run the simulation since not " +"even the resources for a single ``ClientApp`` can be met." msgstr "" -#: ../../source/how-to-run-simulations.rst:33 +#: ../../source/how-to-run-simulations.rst:190 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"A generalization of this is given by the following equation. It gives the" +" maximum number of ``ClientApps`` that can be executed in parallel on " +"available CPU cores (SYS_CPUS) and VRAM (SYS_GPUS)." msgstr "" -#: ../../source/how-to-run-simulations.rst:38 +#: ../../source/how-to-run-simulations.rst:194 msgid "" -"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." +"N = \\min\\left(\\left\\lfloor \\frac{\\text{SYS_CPUS}}{\\text{num_cpus}}" +" \\right\\rfloor, \\left\\lfloor " +"\\frac{\\text{SYS_GPUS}}{\\text{num_gpus}} \\right\\rfloor\\right)" msgstr "" -#: ../../source/how-to-run-simulations.rst:45 -msgid "Launch your Flower simulation" +#: ../../source/how-to-run-simulations.rst:198 +msgid "" +"Both ``num_cpus`` (an integer higher than 1) and ``num_gpus`` (a non-" +"negative real number) should be set on a per ``ClientApp`` basis. If, for" +" example, you want only a single ``ClientApp`` to run on each GPU, then " +"set ``num_gpus=1.0``. If, for example, a ``ClientApp`` requires access to" +" two whole GPUs, you'd set ``num_gpus=2``." msgstr "" -#: ../../source/how-to-run-simulations.rst:47 +#: ../../source/how-to-run-simulations.rst:203 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"While the ``options.backend.client-resources`` can be used to control the" +" degree of concurrency in your simulations, this does not stop you from " +"running hundreds or even thousands of clients in the same round and " +"having orders of magnitude more *dormant* (i.e., not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The ``Simulation " +"Engine`` will schedule 100 ``ClientApps`` to run and then will execute " +"them in a resource-aware manner in batches of 8." msgstr "" -#: ../../source/how-to-run-simulations.rst:73 +#: ../../source/how-to-run-simulations.rst:212 #, fuzzy -msgid "VirtualClientEngine resources" +msgid "Simulation Engine resources" msgstr "Moteur de client virtuel" -#: ../../source/how-to-run-simulations.rst:75 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -"``ray_init_args`` input argument to ``start_simulation`` which the VCE " -"internally passes to Ray's ``ray.init`` command. For a complete list of " -"settings you can configure check the `ray.init " -"`_" -" documentation. Do not set ``ray_init_args`` if you want the VCE to use " -"all your system's CPUs and GPUs." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:97 -msgid "Assigning client resources" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/how-to-run-simulations.rst:214 msgid "" -"By default the ``VirtualClientEngine`` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has " -"10 cores, that many virtual clients can be concurrently running." +"By default, the ``Simulation Engine`` has **access to all system " +"resources** (i.e., all CPUs, all GPUs). However, in some settings, you " +"might want to limit how many of your system resources are used for " +"simulation. You can do this in the ``pyproject.toml`` of your app by " +"setting the ``options.backend.init_args`` variable." msgstr "" -#: ../../source/how-to-run-simulations.rst:103 +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:110 -msgid "``num_cpus`` indicates the number of CPU cores a client would get." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:111 -msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:113 -msgid "Let's see a few examples:" +"With the above setup, the Backend will be initialized with a single CPU " +"and GPU. Therefore, even if more CPUs and GPUs are available in your " +"system, they will not be used for the simulation. The example above " +"results in a single ``ClientApp`` running at any given point." msgstr "" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:233 msgid "" -"While the ``client_resources`` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " -"client sampled by the strategy) and then will execute them in a resource-" -"aware manner in batches of 8." +"For a complete list of settings you can configure, check the `ray.init " +"`_" +" documentation." msgstr "" -#: ../../source/how-to-run-simulations.rst:140 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +#: ../../source/how-to-run-simulations.rst:236 +msgid "For the highest performance, do not set ``options.backend.init_args``." msgstr "" -#: ../../source/how-to-run-simulations.rst:145 +#: ../../source/how-to-run-simulations.rst:239 #, fuzzy -msgid "Simulation examples" -msgstr "Exemples de PyTorch" +msgid "Simulation in Colab/Jupyter" +msgstr "Simulation de moniteur" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:241 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"The preferred way of running simulations should always be " +"|flwr_run_link|_. However, the core functionality of the ``Simulation " +"Engine`` can be used from within a Google Colab or Jupyter environment by" +" means of `run_simulation `_." msgstr "" -#: ../../source/how-to-run-simulations.rst:151 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:262 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"With ``run_simulation``, you can also control the amount of resources for" +" your ``ClientApp`` instances. Do so by setting ``backend_config``. If " +"unset, the default resources are assigned (i.e., 2xCPUs per ``ClientApp``" +" and no GPU)." msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/how-to-run-simulations.rst:154 +#: ../../source/how-to-run-simulations.rst:273 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"Refer to the `30 minutes Federated AI Tutorial " +"`_ for a complete example on how to " +"run Flower Simulations in Colab." msgstr "" -#: ../../source/how-to-run-simulations.rst:159 +#: ../../source/how-to-run-simulations.rst:280 #, fuzzy msgid "Multi-node Flower simulations" msgstr "Simulation de moniteur" -#: ../../source/how-to-run-simulations.rst:161 +#: ../../source/how-to-run-simulations.rst:282 msgid "" -"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure" -" that you:" +"Flower's ``Simulation Engine`` allows you to run FL simulations across " +"multiple compute nodes so that you're not restricted to running " +"simulations on a _single_ machine. Before starting your multi-node " +"simulation, ensure that you:" msgstr "" -#: ../../source/how-to-run-simulations.rst:164 -msgid "Have the same Python environment in all nodes." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:165 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/how-to-run-simulations.rst:286 +msgid "Have the same Python environment on all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:166 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +#: ../../source/how-to-run-simulations.rst:287 +msgid "Have a copy of your code on all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:168 +#: ../../source/how-to-run-simulations.rst:288 msgid "" -"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " -"``VirtualClientEngine`` attaches to a running Ray instance." +"Have a copy of your dataset on all nodes. If you are using partitions " +"from `Flower Datasets `_, ensure the " +"partitioning strategy its parameterization are the same. The expectation " +"is that the i-th dataset partition is identical in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:171 +#: ../../source/how-to-run-simulations.rst:292 msgid "" -"Start Ray on you head node: on the terminal type ``ray start --head``. " +"Start Ray on your head node: on the terminal, type ``ray start --head``. " "This command will print a few lines, one of which indicates how to attach" " other nodes to the head node." msgstr "" -#: ../../source/how-to-run-simulations.rst:174 +#: ../../source/how-to-run-simulations.rst:295 msgid "" "Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -"``ray start --address='192.168.1.132:6379'``" +"starting the head and execute it on the terminal of a new node (before " +"executing |flwr_run_link|_). For example: ``ray start " +"--address='192.168.1.132:6379'``. Note that to be able to attach nodes to" +" the head node they should be discoverable by each other." msgstr "" -#: ../../source/how-to-run-simulations.rst:178 +#: ../../source/how-to-run-simulations.rst:300 msgid "" "With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"would if the simulation were running on a single node. In other words:" msgstr "" -#: ../../source/how-to-run-simulations.rst:181 +#: ../../source/how-to-run-simulations.rst:308 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command ``ray stop`` in each node's terminal " -"(including the head node)." +"Once your simulation is finished, if you'd like to dismantle your " +"cluster, you simply need to run the command ``ray stop`` in each node's " +"terminal (including the head node)." msgstr "" -#: ../../source/how-to-run-simulations.rst:185 -msgid "Multi-node simulation good-to-know" +#: ../../source/how-to-run-simulations.rst:313 +msgid "" +"When attaching a new node to the head, all its resources (i.e., all CPUs," +" all GPUs) will be visible by the head node. This means that the " +"``Simulation Engine`` can schedule as many ``ClientApp`` instances as " +"that node can possibly run. In some settings, you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"``--num-cpus=`` and/or ``--num-" +"gpus=`` in any ``ray start`` command (including when " +"starting the head)." msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-run-simulations.rst:322 +#, fuzzy +msgid "FAQ for Simulations" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-run-simulations.rst +msgid "Can I make my ``ClientApp`` instances stateful?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:326 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"Yes. Use the ``state`` attribute of the |context_link|_ object that is " +"passed to the ``ClientApp`` to save variables, parameters, or results to " +"it. Read the `Designing Stateful Clients `_ guide for a complete walkthrough." msgstr "" -#: ../../source/how-to-run-simulations.rst:189 +#: ../../source/how-to-run-simulations.rst +msgid "Can I run multiple simulations on the same machine?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:330 msgid "" -"User ``ray status`` to check all nodes connected to your head node as " -"well as the total resources available to the ``VirtualClientEngine``." +"Yes, but bear in mind that each simulation isn't aware of the resource " +"usage of the other. If your simulations make use of GPUs, consider " +"setting the ``CUDA_VISIBLE_DEVICES`` environment variable to make each " +"simulation use a different set of the available GPUs. Export such an " +"environment variable before starting |flwr_run_link|_." msgstr "" -#: ../../source/how-to-run-simulations.rst:192 +#: ../../source/how-to-run-simulations.rst msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -"``VirtualClientEngine`` can schedule as many `virtual` clients as that " -"node can possible run. In some settings you might want to exclude certain" -" resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in " -"any ``ray start`` command (including when starting the head)" +"Do the CPU/GPU resources set for each ``ClientApp`` restrict how much " +"compute/memory these make use of?" msgstr "" -#: ../../source/how-to-run-simulations.rst:202 -#, fuzzy -msgid "Considerations for simulations" -msgstr "Simulation de moniteur" +#: ../../source/how-to-run-simulations.rst:334 +msgid "" +"No. These resources are exclusively used by the simulation backend to " +"control how many workers can be created on startup. Let's say N backend " +"workers are launched, then at most N ``ClientApp`` instances will be " +"running in parallel. It is your responsibility to ensure ``ClientApp`` " +"instances have enough resources to execute their workload (e.g., fine-" +"tune a transformer model)." +msgstr "" + +#: ../../source/how-to-run-simulations.rst +msgid "My ``ClientApp`` is triggering OOM on my GPU. What should I do?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:206 +#: ../../source/how-to-run-simulations.rst:338 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +"It is likely that your `num_gpus` setting, which controls the number of " +"``ClientApp`` instances that can share a GPU, is too low (meaning too " +"many ``ClientApps`` share the same GPU). Try the following:" msgstr "" -#: ../../source/how-to-run-simulations.rst:209 +#: ../../source/how-to-run-simulations.rst:340 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Set your ``num_gpus=1``. This will make a single ``ClientApp`` run on a " +"GPU." msgstr "" -#: ../../source/how-to-run-simulations.rst:217 -#, fuzzy -msgid "GPU resources" -msgstr "Ressources" +#: ../../source/how-to-run-simulations.rst:341 +msgid "Inspect how much VRAM is being used (use ``nvidia-smi`` for this)." +msgstr "" -#: ../../source/how-to-run-simulations.rst:219 +#: ../../source/how-to-run-simulations.rst:342 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -"``num_gpus`` in ``client_resources``. This being said, Ray (used " -"internally by the VCE) is by default:" +"Based on the VRAM you see your single ``ClientApp`` using, calculate how " +"many more would fit within the remaining VRAM. One divided by the total " +"number of ``ClientApps`` is the ``num_gpus`` value you should set." msgstr "" -#: ../../source/how-to-run-simulations.rst:222 -msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set ``num_gpus=0.5`` and you have two GPUs in your system with different" -" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " -"concurrently." +#: ../../source/how-to-run-simulations.rst:344 +msgid "Refer to :ref:`clientappresources` for more details." msgstr "" -#: ../../source/how-to-run-simulations.rst:225 +#: ../../source/how-to-run-simulations.rst:346 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"If your ``ClientApp`` is using TensorFlow, make sure you are exporting " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"`` before starting your simulation. For " +"more details, check." msgstr "" -#: ../../source/how-to-run-simulations.rst:228 +#: ../../source/how-to-run-simulations.rst msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"How do I know what's the right ``num_cpus`` and ``num_gpus`` for my " +"``ClientApp``?" msgstr "" -#: ../../source/how-to-run-simulations.rst:231 +#: ../../source/how-to-run-simulations.rst:350 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +"A good practice is to start by running the simulation for a few rounds " +"with higher ``num_cpus`` and ``num_gpus`` than what is really needed " +"(e.g., ``num_cpus=8`` and, if you have a GPU, ``num_gpus=1``). Then " +"monitor your CPU and GPU utilization. For this, you can make use of tools" +" such as ``htop`` and ``nvidia-smi``. If you see overall resource " +"utilization remains low, try lowering ``num_cpus`` and ``num_gpus`` " +"(recall this will make more ``ClientApp`` instances run in parallel) " +"until you see a satisfactory system resource utilization." msgstr "" -#: ../../source/how-to-run-simulations.rst:235 +#: ../../source/how-to-run-simulations.rst:352 msgid "" -"In addition, the GPU resource limits passed to ``client_resources`` are " -"not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"Note that if the workload on your ``ClientApp`` instances is not " +"homogeneous (i.e., some come with a larger compute or memory footprint), " +"you'd probably want to focus on those when coming up with a good value " +"for ``num_gpus`` and ``num_cpus``." msgstr "" -#: ../../source/how-to-run-simulations.rst:240 -#, fuzzy -msgid "TensorFlow with GPUs" -msgstr "Exemples de TensorFlow" +#: ../../source/how-to-run-simulations.rst +msgid "Can I assign different resources to each ``ClientApp`` instance?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:242 +#: ../../source/how-to-run-simulations.rst:356 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"No. All ``ClientApp`` objects are assumed to make use of the same " +"``num_cpus`` and ``num_gpus``. When setting these values (refer to " +":ref:`clientappresources` for more details), ensure the ``ClientApp`` " +"with the largest memory footprint (either RAM or VRAM) can run in your " +"system with others like it in parallel." msgstr "" -#: ../../source/how-to-run-simulations.rst:249 +#: ../../source/how-to-run-simulations.rst msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " -"order to specify a function to be executed upon actor initialization. In " -"this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"Can I run single simulation accross multiple compute nodes (e.g. GPU " +"servers)?" msgstr "" -#: ../../source/how-to-run-simulations.rst:272 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:360 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Yes. If you are using the ``RayBackend`` (the *default* backend) you can " +"first interconnect your nodes through Ray's cli and then launch the " +"simulation. Refer to :ref:`multinodesimulations` for a step-by-step " +"guide." msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/how-to-run-simulations.rst:276 -msgid "Multi-node setups" +#: ../../source/how-to-run-simulations.rst +msgid "" +"My ``ServerApp`` also needs to make use of the GPU (e.g., to do " +"evaluation of the *global model* after aggregation). Is this GPU usage " +"taken into account by the ``Simulation Engine``?" msgstr "" -#: ../../source/how-to-run-simulations.rst:278 +#: ../../source/how-to-run-simulations.rst:364 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"No. The ``Simulation Engine`` only manages ``ClientApps`` and therefore " +"is only aware of the system resources they require. If your ``ServerApp``" +" makes use of substantial compute or memory resources, factor that into " +"account when setting ``num_cpus`` and ``num_gpus``." msgstr "" -#: ../../source/how-to-run-simulations.rst:286 +#: ../../source/how-to-run-simulations.rst +msgid "" +"Can I indicate on what resource a specific instance of a ``ClientApp`` " +"should run? Can I do resource placement?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:368 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Currently, the placement of ``ClientApp`` instances is managed by the " +"``RayBackend`` (the only backend available as of ``flwr==1.13.0``) and " +"cannot be customized. Implementing a *custom* backend would be a way of " +"achieving resource placement." msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 #, fuzzy -msgid "Save and load model checkpoints" +msgid "Save and Load Model Checkpoints" msgstr "Sauvegarde et chargement des points de contrôle PyTorch" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 @@ -8187,7 +7426,7 @@ msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 #, fuzzy -msgid "Model checkpointing" +msgid "Model Checkpointing" msgstr "Point de contrôle du modèle" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 @@ -8215,12 +7454,12 @@ msgstr "" "retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " "(c'est-à-dire le serveur) :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:58 #, fuzzy -msgid "Save and load PyTorch checkpoints" +msgid "Save and Load PyTorch Checkpoints" msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:60 #, fuzzy msgid "" "Similar to the previous example but with a few extra steps, we'll show " @@ -8238,7 +7477,7 @@ msgstr "" "transformés en ``state_dict`` PyTorch en suivant la structure de la " "classe ``OrderedDict``." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:103 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" @@ -8248,17 +7487,38 @@ msgstr "" "à ton code. Note que cela va itérer sur tous les points de contrôle " "sauvegardés et charger le plus récent :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:116 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:119 +msgid "" +"Alternatively, we can save and load the model updates during evaluation " +"phase by overriding ``evaluate()`` or ``aggregate_evaluate()`` method of " +"the strategy (``FedAvg``). Checkout the details in `Advanced PyTorch " +"Example `_ and `Advanced TensorFlow Example " +"`_." +msgstr "" + #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" msgstr "Passe à Flower 1.0" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:6 +msgid "" +"This guide is for users who have already worked with Flower 0.x and want " +"to upgrade to Flower 1.0. Newer versions of Flower (1.13 and later) are " +"based on a new architecture and not covered in this guide. After " +"upgrading Flower 0.x projects to Flower 1.0, please refer to " +":doc:`Upgrade to Flower 1.13 ` to make " +"your project compatible with the lastest version of Flower." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:13 msgid "" "Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" " foundation for future growth. Compared to Flower 0.19 (and other 0.x " @@ -8271,12 +7531,12 @@ msgstr "" "changements qui nécessitent de modifier le code des projets de la série " "0.x existants." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 -#: ../../source/how-to-upgrade-to-flower-next.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:49 msgid "Install update" msgstr "Installer la mise à jour" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:21 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" @@ -8284,11 +7544,11 @@ msgstr "" "Voici comment mettre à jour une installation existante vers Flower 1.0 en" " utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "pip: add ``-U`` when installing." msgstr "pip : ajoute ``-U`` lors de l'installation." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:25 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" @@ -8296,7 +7556,7 @@ msgstr "" "``python -m pip install -U flwr`` (lors de l'utilisation de " "``start_server`` et ``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" @@ -8304,7 +7564,7 @@ msgstr "" "``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de" " ``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -8314,13 +7574,13 @@ msgstr "" "puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " "poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:32 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" "``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " "``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -8328,22 +7588,22 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " "l'utilisation de ``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 -#: ../../source/how-to-upgrade-to-flower-next.rst:121 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:88 msgid "Required changes" msgstr "Changements nécessaires" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "The following breaking changes require manual updates." msgstr "" "Les changements de rupture suivants nécessitent des mises à jour " "manuelles." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:42 msgid "General" msgstr "Généralités" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:44 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" @@ -8351,7 +7611,7 @@ msgstr "" "Passe tous les arguments comme des arguments de mots-clés (et non comme " "des arguments de position). Voici un exemple :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" @@ -8359,7 +7619,7 @@ msgstr "" "Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," " FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -8369,12 +7629,12 @@ msgstr "" "``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "Client" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -8382,7 +7642,7 @@ msgstr "" "Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " "en ``def get_parameters(self, config):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -8390,11 +7650,11 @@ msgstr "" "Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " "``def get_parameters(self, ins : GetParametersIns):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -8402,7 +7662,7 @@ msgstr "" "Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" " ``start_simulation``. Voici un exemple :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -8410,7 +7670,7 @@ msgstr "" "Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " "\"round_timeout\" : 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -8420,7 +7680,7 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:70 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" @@ -8428,7 +7688,7 @@ msgstr "" "Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " "``config=ServerConfig(...)`` (voir point précédent)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:72 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -8440,19 +7700,19 @@ msgstr "" "activée en configurant la stratégie pour échantillonner tous les clients " "pour l'évaluation après le dernier tour de formation." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:75 msgid "Rename parameter/ndarray conversion functions:" msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:78 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -8469,23 +7729,23 @@ msgstr "" "stratégie) doivent maintenant initialiser manuellement FedAvg avec " "``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:92 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -8495,11 +7755,11 @@ msgstr "" "méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," " ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:95 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:97 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8507,7 +7767,7 @@ msgstr "" "Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:99 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -8517,11 +7777,11 @@ msgstr "" "config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "Custom strategies" msgstr "Stratégies personnalisées" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -8534,7 +7794,7 @@ msgstr "" "``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " "BaseException]]`` (dans ``aggregate_evaluate``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" @@ -8542,7 +7802,7 @@ msgstr "" "La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " "d'apprentissage/évaluation fédéré comme premier paramètre :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:112 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8550,7 +7810,7 @@ msgstr "" "Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8558,11 +7818,11 @@ msgstr "" "Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:118 msgid "Optional improvements" msgstr "Améliorations facultatives" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:120 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" @@ -8571,7 +7831,7 @@ msgstr "" "certain nombre d'améliorations potentielles qui viennent d'être rendues " "possibles :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -8583,7 +7843,7 @@ msgstr "" "serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " "sont plus nécessaires." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:126 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -8593,12 +7853,12 @@ msgstr "" "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 -#: ../../source/how-to-upgrade-to-flower-next.rst:349 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:130 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:451 msgid "Further help" msgstr "Aide supplémentaire" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:132 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -8613,186 +7873,241 @@ msgstr "" "Flower `_ et utilise le canal " "``#questions``." -#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:2 #, fuzzy -msgid "Upgrade to Flower Next" +msgid "Upgrade to Flower 1.13" msgstr "Passe à Flower 1.0" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:4 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " +"Welcome to the migration guide for updating Flower to Flower 1.13! " "Whether you're a seasoned user or just getting started, this guide will " "help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +" latest features and improvements in Flower 1.13." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:11 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:10 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"This guide shows how to make pre-``1.13`` Flower code compatible with " +"Flower 1.13 (and later) with only minimal code changes." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:13 msgid "Let's dive in!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:51 #, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " +"Here's how to update an existing installation of Flower to Flower 1.13 " "with ``pip``:" msgstr "" "Voici comment mettre à jour une installation existante vers Flower 1.0 en" " utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-next.rst:74 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:57 +msgid "or if you need Flower 1.13 with simulation:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:63 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:73 msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:101 -#, fuzzy -msgid "Using Poetry" -msgstr "Utiliser la poésie (recommandé)" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:90 +msgid "" +"Starting with Flower 1.8, the *infrastructure* and *application layers* " +"have been decoupled. Flower 1.13 enforces this separation further. Among " +"other things, this allows you to run the exact same code in a simulation " +"as in a real deployment." +msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:103 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.13.rst:94 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"Instead of starting a client in code via ``start_client()``, you create a" +" |clientapp_link|_. Instead of starting a server in code via " +"``start_server()``, you create a |serverapp_link|_. Both ``ClientApp`` " +"and ``ServerApp`` are started by the long-running components of the " +"server and client: the `SuperLink` and `SuperNode`, respectively." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-next.rst:106 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.13.rst:102 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." +"For more details on SuperLink and SuperNode, please see the " +"|flower_architecture_link|_ ." +msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:105 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"The following non-breaking changes require manual updates and allow you " +"to run your project both in the traditional (now deprecated) way and in " +"the new (recommended) Flower 1.13 way:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:110 #, fuzzy msgid "|clientapp_link|_" msgstr "client" -#: ../../source/how-to-upgrade-to-flower-next.rst:134 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:112 msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"via ``start_client()``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:146 #, fuzzy msgid "|serverapp_link|_" msgstr "serveur" -#: ../../source/how-to-upgrade-to-flower-next.rst:159 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:148 msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"the server via ``start_server()``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:180 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:185 msgid "Deployment" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:182 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:187 +msgid "" +"In a terminal window, start the SuperLink using |flower_superlink_link|_." +" Then, in two additional terminal windows, start two SuperNodes using " +"|flower_supernode_link|_ (2x). There is no need to directly run " +"``client.py`` and ``server.py`` as Python scripts." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:190 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"Here's an example to start the server without HTTPS (insecure mode, only " +"for prototyping):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:185 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:195 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"For a comprehensive walk-through on how to deploy Flower using Docker, " +"please refer to the :doc:`docker/index` guide." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:218 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"Here's another example to start both SuperLink and SuperNodes with HTTPS." +" Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile``" +" command line options to pass paths to (CA certificate, server " +"certificate, and server private key)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:229 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:246 #, fuzzy -msgid "Simulation in CLI" +msgid "Simulation (CLI)" msgstr "Simulation de moniteur" -#: ../../source/how-to-upgrade-to-flower-next.rst:231 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:248 msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"|serverapp_link|_, respectively. There is no need to use " +"``start_simulation()`` anymore. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:253 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:389 +msgid "" +"For a comprehensive guide on how to setup and run Flower simulations " +"please read the |flower_how_to_run_simulations_link|_ guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:310 +msgid "Depending on your Flower version, you can run your simulation as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:312 +msgid "" +"For Flower 1.11 and later, run ``flwr run`` in the terminal. This is the " +"recommended way to start simulations, other ways are deprecated and no " +"longer recommended." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:264 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:314 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"DEPRECATED For Flower versions between 1.8 and 1.10, run ``flower-" +"simulation`` in the terminal and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +" In the code snippet below, there is an example (assuming the " +"``server_app`` and ``client_app`` objects are in a ``sim.py`` module)." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:318 +msgid "DEPRECATED For Flower versions before 1.8, run the Python script directly." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:337 +msgid "" +"Depending on your Flower version, you can also define the default " +"resources as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:339 +msgid "" +"For Flower 1.11 and later, you can edit your ``pyproject.toml`` file and " +"then run ``flwr run`` in the terminal as shown in the example below." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:341 +msgid "" +"DEPRECATED For Flower versions between 1.8 and 1.10, you can adjust the " +"resources for each |clientapp_link|_ using the ``--backend-config`` " +"command line argument instead of setting the ``client_resources`` " +"argument in ``start_simulation()``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:281 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:344 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:384 +msgid "" +"DEPRECATED For Flower versions before 1.8, you need to run " +"``start_simulation()`` and pass a dictionary of the required resources to" +" the ``client_resources`` argument." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:375 +#, fuzzy +msgid "Simulation (Notebook)" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:377 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"To run your simulation from within a notebook, please consider the " +"following examples depending on your Flower version:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:305 -msgid "Simulation in a Notebook" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:380 +msgid "" +"For Flower 1.11 and later, you need to run |runsim_link|_ in your " +"notebook instead of ``start_simulation()``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:307 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:382 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"DEPRECATED For Flower versions between 1.8 and 1.10, you need to run " +"|runsim_link|_ in your notebook instead of ``start_simulation()`` and " +"configure the resources." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:453 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"Most official `Flower code examples `_ " +"are already updated to Flower 1.13 so they can serve as a reference for " +"using the Flower 1.13 API. If there are further questions, `join the " +"Flower Slack `_ (and use the channel " +"``#questions``) or post them on `Flower Discuss " +"`_ where you can find the community posting " +"and answering questions." msgstr "" "La plupart des `exemples de code Flower officiels " "`_ sont déjà mis à " @@ -8801,19 +8116,18 @@ msgstr "" "Flower `_ et utilise le canal " "``#questions``." -#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:460 #, fuzzy msgid "Important" msgstr "Changements importants :" -#: ../../source/how-to-upgrade-to-flower-next.rst:360 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:462 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"As we continuously enhance Flower at a rapid pace, we'll be periodically " +"updating this guide. Please feel free to share any feedback with us!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:366 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:465 msgid "Happy migrating! 🚀" msgstr "" @@ -9047,7 +8361,7 @@ msgid "" "side clipping:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:115 +#: ../../source/how-to-use-differential-privacy.rst:116 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -9059,11 +8373,11 @@ msgstr "" msgid "local DP mod" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:126 msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:140 +#: ../../source/how-to-use-differential-privacy.rst:144 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -9071,19 +8385,19 @@ msgid "" "parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:145 +#: ../../source/how-to-use-differential-privacy.rst:149 msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:147 +#: ../../source/how-to-use-differential-privacy.rst:151 msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " "TensorFlow Privacy. For examples of using Flower with these engines, " "please refer to the Flower examples directory (`Opacus " "`_, `Tensorflow" -" Privacy `_)." +" Privacy `_)." msgstr "" #: ../../source/how-to-use-strategies.rst:2 @@ -9116,12 +8430,12 @@ msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" #: ../../source/how-to-use-strategies.rst:11 -#: ../../source/how-to-use-strategies.rst:43 +#: ../../source/how-to-use-strategies.rst:66 msgid "Customize an existing strategy with callback functions" msgstr "Personnalise une stratégie existante avec des fonctions de rappel" #: ../../source/how-to-use-strategies.rst:12 -#: ../../source/how-to-use-strategies.rst:99 +#: ../../source/how-to-use-strategies.rst:139 msgid "Implement a novel strategy" msgstr "Mets en place une nouvelle stratégie" @@ -9130,46 +8444,54 @@ msgid "Use an existing strategy" msgstr "Utilise une stratégie existante" #: ../../source/how-to-use-strategies.rst:17 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Flower comes with a number of popular federated learning Strategies which" +" can be instantiated as follows:" msgstr "" "Flower intègre un certain nombre de stratégies d'apprentissage fédéré " "populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: ../../source/how-to-use-strategies.rst:27 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:45 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the ``start_server`` function. It is usually recommended" -" to adjust a few parameters during instantiation:" +"To make the ``ServerApp`` use this strategy, pass a ``server_fn`` " +"function to the ``ServerApp`` constructor. The ``server_fn`` function " +"should return a ``ServerAppComponents`` object that contains the strategy" +" instance and a ``ServerConfig`` instance." msgstr "" -"Cela crée une stratégie dont tous les paramètres sont laissés à leur " -"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " -"est généralement recommandé d'ajuster quelques paramètres lors de " -"l'instanciation :" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:50 +msgid "" +"Both ``Strategy`` and ``ServerConfig`` classes can be configured with " +"parameters. The ``Context`` object passed to ``server_fn`` contains the " +"values specified in the ``[tool.flwr.app.config]`` table in your " +"``pyproject.toml`` (a snippet is shown below). To access these values, " +"use ``context.run_config``." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:68 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " +"Existing strategies provide several ways to customize their behavior. " "Callback functions allow strategies to call user-provided code during " -"execution." +"execution. This approach enables you to modify the strategy's partial " +"behavior without rewriting the whole class from zero." msgstr "" "Les stratégies existantes offrent plusieurs façons de personnaliser leur " "comportement. Les fonctions de rappel permettent aux stratégies d'appeler" " le code fourni par l'utilisateur pendant l'exécution." -#: ../../source/how-to-use-strategies.rst:49 +#: ../../source/how-to-use-strategies.rst:73 msgid "Configuring client fit and client evaluate" msgstr "Configurer l'adaptation et l'évaluation du client" -#: ../../source/how-to-use-strategies.rst:51 +#: ../../source/how-to-use-strategies.rst:75 #, fuzzy msgid "" "The server can pass new configuration values to the client each round by " "providing a function to ``on_fit_config_fn``. The provided function will " "be called by the strategy and must return a dictionary of configuration " -"key values pairs that will be sent to the client. It must return a " +"key value pairs that will be sent to the client. It must return a " "dictionary of arbitrary configuration values ``client.fit`` and " "``client.evaluate`` functions during each round of federated learning." msgstr "" @@ -9182,14 +8504,17 @@ msgstr "" "et :code:`client.evaluate` au cours de chaque tour d'apprentissage " "fédéré." -#: ../../source/how-to-use-strategies.rst:84 +#: ../../source/how-to-use-strategies.rst:121 #, fuzzy msgid "" "The ``on_fit_config_fn`` can be used to pass arbitrary configuration " -"values from server to client, and potentially change these values each " +"values from server to client and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " "the dictionary returned by the ``on_fit_config_fn`` in its own " -"``client.fit()`` function." +"``client.fit()`` function. And while the values can be also passed " +"directly via the context this function can be a place to implement finer " +"control over the `fit` behaviour that may not be achieved by the context," +" which sets fixed values." msgstr "" "Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " "configuration arbitraires du serveur au client, et changer poétiquement " @@ -9197,7 +8522,7 @@ msgstr "" "d'apprentissage. Le client recevra le dictionnaire renvoyé par le " ":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:129 #, fuzzy msgid "" "Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " @@ -9207,11 +8532,11 @@ msgstr "" ":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" " à :code:`client.evaluate()`" -#: ../../source/how-to-use-strategies.rst:93 +#: ../../source/how-to-use-strategies.rst:133 msgid "Configuring server-side evaluation" msgstr "Configuration de l'évaluation côté serveur" -#: ../../source/how-to-use-strategies.rst:95 +#: ../../source/how-to-use-strategies.rst:135 #, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " @@ -9220,7 +8545,7 @@ msgstr "" "L'évaluation côté serveur peut être activée en passant une fonction " "d'évaluation à :code:`evaluate_fn`." -#: ../../source/how-to-use-strategies.rst:101 +#: ../../source/how-to-use-strategies.rst:141 #, fuzzy msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " @@ -9244,11 +8569,7 @@ msgstr "Quickstart tutorials" msgid "How-to guides" msgstr "Guides" -#: ../../source/index.rst:106 -msgid "Legacy example guides" -msgstr "" - -#: ../../source/index.rst:114 ../../source/index.rst:119 +#: ../../source/index.rst:107 ../../source/index.rst:112 msgid "Explanations" msgstr "Explications" @@ -9256,26 +8577,26 @@ msgstr "Explications" msgid "API reference" msgstr "Référence pour l'API" -#: ../../source/index.rst:145 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "Référence pour la documentation" -#: ../../source/index.rst:160 +#: ../../source/index.rst:153 #, fuzzy msgid "Contributor tutorials" msgstr "Configuration du contributeur" -#: ../../source/index.rst:167 +#: ../../source/index.rst:160 #, fuzzy msgid "Contributor how-to guides" msgstr "Guide pour les contributeurs" -#: ../../source/index.rst:179 +#: ../../source/index.rst:172 #, fuzzy msgid "Contributor explanations" msgstr "Explications" -#: ../../source/index.rst:185 +#: ../../source/index.rst:178 #, fuzzy msgid "Contributor references" msgstr "Configuration du contributeur" @@ -9292,12 +8613,13 @@ msgid "Flower Framework Documentation" msgstr "Rédiger de la documentation" #: ../../source/index.rst:7 +#, fuzzy msgid "" "Welcome to Flower's documentation. `Flower `_ is a " "friendly federated learning framework." msgstr "" "Bienvenue sur la documentation de Flower. `Flower `_ " -"est un framework de federated learning convivial et facile à utiliser." +"est un framework de federated AI convivial et facile à utiliser." #: ../../source/index.rst:11 msgid "Join the Flower Community" @@ -9384,7 +8706,7 @@ msgstr "" "Guides orientés sur la résolutions étapes par étapes de problèmes ou " "objectifs specifiques." -#: ../../source/index.rst:116 +#: ../../source/index.rst:109 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." @@ -9392,29 +8714,29 @@ msgstr "" "Guides orientés sur la compréhension et l'explication des sujets et idées" " de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/index.rst:128 +#: ../../source/index.rst:121 #, fuzzy msgid "References" msgstr "Référence" -#: ../../source/index.rst:130 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/index.rst:139::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:139::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:155 +#: ../../source/index.rst:148 #, fuzzy msgid "Contributor docs" msgstr "Configuration du contributeur" -#: ../../source/index.rst:157 +#: ../../source/index.rst:150 #, fuzzy msgid "" "The Flower community welcomes contributions. The following docs are " @@ -9429,14 +8751,20 @@ msgstr "" msgid "Flower CLI reference" msgstr "Client de Flower" -#: ../../source/ref-api-cli.rst:7 +#: ../../source/ref-api-cli.rst:5 #, fuzzy -msgid "flwr CLI" +msgid "Basic Commands" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-api-cli.rst:10 +#, fuzzy +msgid "``flwr`` CLI" msgstr "Client de Flower" #: ../../flwr:1 +#, fuzzy msgid "flwr is the Flower command line interface." -msgstr "" +msgstr "Client de Flower" #: ../../source/ref-api-cli.rst #, fuzzy @@ -9521,7 +8849,7 @@ msgstr "" msgid "Arguments" msgstr "Amélioration de la documentation" -#: ../../flwr install:1 log:1 new:1 run:1 +#: ../../flwr install:1 log:1 ls:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "Améliorations facultatives" @@ -9538,7 +8866,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log run +#: ../../flwr log ls run #, fuzzy msgid "default" msgstr "Flux de travail" @@ -9565,10 +8893,35 @@ msgstr "" msgid "Name of the federation to run the app on" msgstr "" +#: ../../flwr ls:1 +msgid "List runs." +msgstr "" + +#: ../../flwr ls:1 +msgid "List all runs" +msgstr "" + +#: ../../flwr ls:1 run:1 +msgid "``False``" +msgstr "" + +#: ../../flwr ls:1 +msgid "Specific run ID to display" +msgstr "" + +#: ../../flwr ls:1 +#, fuzzy +msgid "Path of the Flower project" +msgstr "Chargement des données" + +#: ../../flwr ls:1 +msgid "Name of the federation" +msgstr "" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." -msgstr "Serveur de Flower" +msgstr "Créer une nouvelle page" #: ../../flwr new:1 msgid "The ML framework to use" @@ -9621,11 +8974,6 @@ msgid "" "default." msgstr "" -#: ../../flwr run:1 -#, fuzzy -msgid "``False``" -msgstr ":code:`évaluer`" - #: ../../flwr run:1 #, fuzzy msgid "Path of the Flower App to run." @@ -9635,39 +8983,68 @@ msgstr "Chargement des données" msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api-cli.rst:16 +#: ../../source/ref-api-cli.rst:19 #, fuzzy -msgid "flower-simulation" -msgstr "Simulation de moniteur" - -#: ../../source/ref-api-cli.rst:26 -msgid "flower-superlink" +msgid "``flower-superlink``" msgstr "flower-superlink" -#: ../../source/ref-api-cli.rst:36 +#: ../../source/ref-api-cli.rst:29 #, fuzzy -msgid "flower-supernode" +msgid "``flower-supernode``" msgstr "Serveur de Flower" -#: ../../source/ref-api-cli.rst:46 +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "Advanced Commands" +msgstr "Options d'installation avancées" + +#: ../../source/ref-api-cli.rst:42 +#, fuzzy +msgid "``flwr-serverapp``" +msgstr "flower-driver-api" + +#: ../../source/ref-api-cli.rst:52 +#, fuzzy +msgid "``flwr-clientapp``" +msgstr "Flower ClientApp." + +#: ../../source/ref-api-cli.rst:60 +#, fuzzy +msgid "Technical Commands" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-api-cli.rst:65 +#, fuzzy +msgid "``flower-simulation``" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api-cli.rst:73 #, fuzzy -msgid "flower-server-app" +msgid "Deprecated Commands" +msgstr "Dépréciations" + +#: ../../source/ref-api-cli.rst:78 +#, fuzzy +msgid "``flower-server-app``" msgstr "flower-driver-api" -#: ../../source/ref-api-cli.rst:50 +#: ../../source/ref-api-cli.rst:82 msgid "" -"Note that since version ``1.11.0``, ``flower-server-app`` no longer " -"supports passing a reference to a `ServerApp` attribute. Instead, you " -"need to pass the path to Flower app via the argument ``--app``. This is " -"the path to a directory containing a `pyproject.toml`. You can create a " -"valid Flower app by executing ``flwr new`` and following the prompt." +"Note that from version ``1.13.0``, ``flower-server-app`` is deprecated. " +"Instead, you only need to execute |flwr_run_link|_ to start the run." msgstr "" -#: ../../source/ref-api-cli.rst:64 +#: ../../source/ref-api-cli.rst:88 #, fuzzy -msgid "flower-superexec" +msgid "``flower-superexec``" msgstr "flower-superlink" +#: ../../source/ref-api-cli.rst:92 +msgid "" +"Note that from version ``1.13.0``, ``flower-superexec`` is deprecated. " +"Instead, you only need to execute |flower_superlink_link|_." +msgstr "" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" @@ -9760,6 +9137,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:24 #: ../../source/ref-api/flwr.server.strategy.rst:17 #: ../../source/ref-api/flwr.server.workflow.rst:17 +#: ../../source/ref-api/flwr.simulation.rst:26 msgid "Classes" msgstr "" @@ -9876,6 +9254,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:15 msgid "Methods" msgstr "" @@ -9979,7 +9358,7 @@ msgstr "" #: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.Driver.rst:43 #: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 @@ -10023,6 +9402,7 @@ msgstr "" #: flwr.server.driver.driver.Driver.pull_messages #: flwr.server.driver.driver.Driver.push_messages #: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.driver.driver.Driver.set_run #: flwr.server.serverapp_components.ServerAppComponents #: flwr.server.strategy.bulyan.Bulyan #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping @@ -10046,7 +9426,8 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.run_simulation.run_simulation of +#: flwr.simulation.run_simulation.run_simulation +#: flwr.simulation.simulationio_connection.SimulationIoConnection of #, fuzzy msgid "Parameters" msgstr "Paramètres du modèle." @@ -10065,6 +9446,7 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -10098,6 +9480,7 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -10153,11 +9536,6 @@ msgstr "" msgid "The current client properties." msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -#, fuzzy -msgid "ClientApp" -msgstr "client" - #: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 #: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 #: flwr.common.context.Context:1 flwr.common.message.Error:1 @@ -10176,11 +9554,11 @@ msgstr "client" #: flwr.server.serverapp_components.ServerAppComponents:1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.app.start_client:51 flwr.client.app.start_numpy_client:36 #: flwr.client.client_app.ClientApp:4 #: flwr.client.client_app.ClientApp.evaluate:4 #: flwr.client.client_app.ClientApp.query:4 @@ -10189,7 +9567,7 @@ msgstr "" #: flwr.common.record.configsrecord.ConfigsRecord:20 #: flwr.common.record.metricsrecord.MetricsRecord:19 #: flwr.common.record.parametersrecord.ParametersRecord:22 -#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:46 #: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 @@ -10651,24 +10029,30 @@ msgstr "Flux de travail" msgid "start\\_client" msgstr "start_client" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: flwr.client.app.start_client:5 of +msgid "" +"This function is deprecated since 1.13.0. Use :code:`flower-supernode` " +"command instead to start a SuperNode." +msgstr "" + +#: flwr.client.app.start_client:8 flwr.client.app.start_numpy_client:9 of msgid "" "The IPv4 or IPv6 address of the server. If the Flower server runs on the " "same machine on port 8080, then `server_address` would be " "`\"[::]:8080\"`." msgstr "" -#: flwr.client.app.start_client:7 of +#: flwr.client.app.start_client:12 of msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.client.app.start_client:9 of +#: flwr.client.app.start_client:14 of msgid "" "An implementation of the abstract base class `flwr.client.Client` " "(default: None)" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: flwr.client.app.start_client:17 flwr.client.app.start_numpy_client:15 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " server. The default should be sufficient for most models. Users who " @@ -10678,49 +10062,57 @@ msgid "" "increased limit and block larger messages." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.client.app.start_client:24 flwr.client.app.start_numpy_client:22 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " "to an SSL-enabled Flower server." msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.client.app.start_client:28 flwr.client.app.start_numpy_client:26 of msgid "" "Starts an insecure gRPC connection when True. Enables HTTPS connection " "when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.client.app.start_client:31 flwr.client.app.start_numpy_client:29 of msgid "" "Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " "bidirectional streaming - 'grpc-rere': gRPC, request-response " "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:31 of +#: flwr.client.app.start_client:36 of +msgid "" +"Tuple containing the elliptic curve private key and public key for " +"authentication from the cryptography library. Source: " +"https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ Used " +"to establish an authenticated connection with the server." +msgstr "" + +#: flwr.client.app.start_client:41 of msgid "" "The maximum number of times the client will try to connect to the server " "before giving up in case of a connection error. If set to None, there is " "no limit to the number of tries." msgstr "" -#: flwr.client.app.start_client:35 of +#: flwr.client.app.start_client:45 of msgid "" "The maximum duration before the client stops trying to connect to the " "server in case of connection error. If set to None, there is no limit to " "the total time." msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:59 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:70 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -10906,10 +10298,11 @@ msgid "Configs record." msgstr "Configurer les clients" #: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy msgid "" -":py:obj:`Context `\\ \\(node\\_id\\, " -"node\\_config\\, state\\, run\\_config\\)" -msgstr "" +":py:obj:`Context `\\ \\(run\\_id\\, node\\_id\\, " +"node\\_config\\, state\\, ...\\)" +msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.common.rst:68::1 #: flwr.common.context.Context:1 of @@ -11448,19 +10841,23 @@ msgid "Context" msgstr "" #: flwr.common.context.Context:3 of -msgid "The ID that identifies the node." +msgid "The ID that identifies the run." msgstr "" #: flwr.common.context.Context:5 of +msgid "The ID that identifies the node." +msgstr "" + +#: flwr.common.context.Context:7 of msgid "" "A config (key/value mapping) unique to the node and independent of the " "`run_config`. This config persists across all runs this node participates" " in." msgstr "" -#: flwr.common.context.Context:8 of +#: flwr.common.context.Context:10 of msgid "" -"Holds records added by the entity in a given run and that will stay " +"Holds records added by the entity in a given `run_id` and that will stay " "local. This means that the data it holds will never leave the system it's" " running from. This can be used as an intermediate storage or scratchpad " "when executing mods. It can also be used as a memory to access at " @@ -11468,29 +10865,34 @@ msgid "" "multiple rounds)" msgstr "" -#: flwr.common.context.Context:15 of +#: flwr.common.context.Context:17 of msgid "" -"A config (key/value mapping) held by the entity in a given run and that " -"will stay local. It can be used at any point during the lifecycle of this" -" entity (e.g. across multiple rounds)" +"A config (key/value mapping) held by the entity in a given `run_id` and " +"that will stay local. It can be used at any point during the lifecycle of" +" this entity (e.g. across multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`state `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr "serveur.stratégie.Stratégie" @@ -12084,20 +11486,6 @@ msgid "" "`\\" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" - #: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" @@ -12675,6 +12063,10 @@ msgstr "" msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" +#: flwr.common.message.Message.create_error_reply:12 of +msgid "**message** -- A Message containing only the relevant error and metadata." +msgstr "" + #: flwr.common.message.Message.create_reply:3 of msgid "" "The method generates a new `Message` as a reply to this message. It " @@ -12718,6 +12110,10 @@ msgstr "" msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Métadonnées" + #: flwr.common.Metadata.created_at:1::1 #: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of msgid "An identifier for the current run." @@ -13293,7 +12689,7 @@ msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.server.rst:37::1 #: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +msgid "Abstract base Driver class for the ServerAppIo API." msgstr "" #: ../../source/ref-api/flwr.server.rst:37::1 @@ -13474,6 +12870,10 @@ msgstr "" msgid "**num_available** -- The number of currently available clients." msgstr "" +#: flwr.server.client_manager.ClientManager.register:3 of +msgid "The ClientProxy of the Client to register." +msgstr "" + #: flwr.server.client_manager.ClientManager.register:6 #: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" @@ -13487,12 +12887,16 @@ msgstr "" msgid "This method is idempotent." msgstr "" +#: flwr.server.client_manager.ClientManager.unregister:5 of +msgid "The ClientProxy of the Client to unregister." +msgstr "" + #: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy msgid "Driver" msgstr "serveur" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`create_message `\\ " @@ -13502,43 +12906,43 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.create_message:1 of msgid "Create a new message with specified parameters." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.get_node_ids:1 of msgid "Get node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of msgid "Pull messages based on message IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`push_messages `\\ " "\\(messages\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.push_messages:1 of msgid "Push messages to specified node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`send_and_receive `\\ " @@ -13548,11 +12952,21 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#, fuzzy +msgid ":py:obj:`set_run `\\ \\(run\\_id\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#: flwr.server.driver.driver.Driver.set_run:1 of +msgid "Request a run to the SuperLink with a given `run_id`." +msgstr "" + #: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy msgid ":py:obj:`run `\\" @@ -13664,6 +13078,17 @@ msgid "" "which is not affected by `timeout`." msgstr "" +#: flwr.server.driver.driver.Driver.set_run:3 of +msgid "" +"If a Run with the specified `run_id` exists, a local Run object will be " +"created. It enables further functionality in the driver, such as sending " +"`Messages`." +msgstr "" + +#: flwr.server.driver.driver.Driver.set_run:7 of +msgid "The `run_id` of the Run this Driver object operates in." +msgstr "" + #: ../../source/ref-api/flwr.server.History.rst:2 msgid "History" msgstr "" @@ -13738,41 +13163,46 @@ msgstr "" msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`config `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`strategy `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`client_manager `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`history `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`state `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr "serveur.stratégie.Stratégie" @@ -13853,11 +13283,6 @@ msgstr "" msgid "Replace server strategy." msgstr "stratégie.du.serveur" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -#, fuzzy -msgid "ServerApp" -msgstr "serveur" - #: flwr.server.server_app.ServerApp:5 of #, fuzzy msgid "Use the `ServerApp` with an existing `Strategy`:" @@ -13888,7 +13313,7 @@ msgid "" "thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.app.start_server:9 +#: flwr.server.app.start_server:14 #: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " @@ -14018,31 +13443,37 @@ msgstr "" msgid "start\\_server" msgstr "serveur.start_server" -#: flwr.server.app.start_server:3 of +#: flwr.server.app.start_server:5 of +msgid "" +"This function is deprecated since 1.13.0. Use the :code:`flower-" +"superlink` command instead to start a SuperLink." +msgstr "" + +#: flwr.server.app.start_server:8 of msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.server.app.start_server:10 of msgid "" "A server implementation, either `flwr.server.Server` or a subclass " "thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.server.app.start_server:17 of msgid "" "An implementation of the abstract base class " "`flwr.server.strategy.Strategy`. If no strategy is provided, then " "`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.server.app.start_server:21 of msgid "" "An implementation of the abstract base class `flwr.server.ClientManager`." " If no implementation is provided, then `start_server` will use " "`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.server.app.start_server:26 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " clients. The default should be sufficient for most models. Users who " @@ -14052,7 +13483,7 @@ msgid "" "increased limit and block larger messages." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " @@ -14060,38 +13491,38 @@ msgid "" "server certificate. * server private key." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " "bytes elements in the following order:" msgstr "" -#: flwr.server.app.start_server:32 of +#: flwr.server.app.start_server:37 of #, fuzzy msgid "CA certificate." msgstr "Certificats" -#: flwr.server.app.start_server:33 of +#: flwr.server.app.start_server:38 of #, fuzzy msgid "server certificate." msgstr "Certificats" -#: flwr.server.app.start_server:34 of +#: flwr.server.app.start_server:39 of #, fuzzy msgid "server private key." msgstr "stratégie.du.serveur" -#: flwr.server.app.start_server:37 of +#: flwr.server.app.start_server:42 of msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.app.start_server:42 of +#: flwr.server.app.start_server:47 of #, fuzzy msgid "Starting an insecure server:" msgstr "Démarrer le serveur" -#: flwr.server.app.start_server:46 of +#: flwr.server.app.start_server:51 of #, fuzzy msgid "Starting an SSL-enabled server:" msgstr "Démarrer le serveur" @@ -15403,7 +14834,7 @@ msgid "" msgstr "" #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:1231 +#: ../../source/ref-changelog.md:1434 msgid "FedAdagrad" msgstr "FedAdagrad" @@ -17081,29 +16512,71 @@ msgstr "" msgid "simulation" msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 msgid "" ":py:obj:`run_simulation `\\ " "\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.run_simulation.run_simulation:1 of msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#, fuzzy +msgid "" +":py:obj:`run_simulation_process " +"`\\ \\(...\\[\\, flwr\\_dir\\_\\," +" ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#: flwr.simulation.app.run_simulation_process:1 of +#, fuzzy +msgid "Run Flower Simulation process." +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 #, fuzzy msgid "" ":py:obj:`start_simulation `\\ " "\\(\\*args\\, \\*\\*kwargs\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.start_simulation:1 of msgid "Log error stating that module `ray` could not be imported." msgstr "" +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#, fuzzy +msgid "" +":py:obj:`SimulationIoConnection " +"`\\ \\(\\[...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of +msgid "`SimulationIoConnection` provides an interface to the SimulationIo API." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:2 +#, fuzzy +msgid "SimulationIoConnection" +msgstr "Simulation de moniteur" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:3 of +msgid "The address (URL, IPv6, IPv4) of the SuperLink SimulationIo API service." +msgstr "" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:5 of +msgid "" +"The PEM-encoded root certificates as a byte string. If provided, a secure" +" connection using the certificates will be established to an SSL-enabled " +"Flower server." +msgstr "" + #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 #, fuzzy msgid "run\\_simulation" @@ -17159,6 +16632,11 @@ msgid "" "If enabled, DEBUG-level logs will be displayed." msgstr "" +#: ../../source/ref-api/flwr.simulation.run_simulation_process.rst:2 +#, fuzzy +msgid "run\\_simulation\\_process" +msgstr "Simulation de moniteur" + #: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 #, fuzzy msgid "start\\_simulation" @@ -17170,25 +16648,27 @@ msgstr "Changelog" #: ../../source/ref-changelog.md:3 #, fuzzy -msgid "v1.11.1 (2024-09-11)" -msgstr "v1.3.0 (2023-02-06)" +msgid "v1.13.1 (2024-11-26)" +msgstr "v1.4.0 (2023-04-21)" #: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 +#: ../../source/ref-changelog.md:138 ../../source/ref-changelog.md:208 +#: ../../source/ref-changelog.md:240 ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:442 ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:606 ../../source/ref-changelog.md:699 +#: ../../source/ref-changelog.md:799 ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:1074 ../../source/ref-changelog.md:1143 msgid "Thanks to our contributors" msgstr "Merci à nos contributeurs" #: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:140 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:242 ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:444 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:608 ../../source/ref-changelog.md:701 +#: ../../source/ref-changelog.md:801 ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:949 ../../source/ref-changelog.md:1007 msgid "" "We would like to give our special thanks to all the contributors who made" " the new version of Flower possible (in `git shortlog` order):" @@ -17199,260 +16679,270 @@ msgstr "" #: ../../source/ref-changelog.md:9 msgid "" -"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " -"`Javier`, `Robert Steiner`, `Yan Gao` " -msgstr "" - -#: ../../source/ref-changelog.md:11 -#, fuzzy -msgid "Improvements" -msgstr "Améliorations facultatives" +"`Adam Narozniak`, `Charles Beauville`, `Heng Pan`, `Javier`, `Robert " +"Steiner` " +msgstr "" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:144 ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:448 +#: ../../source/ref-changelog.md:548 ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:705 ../../source/ref-changelog.md:805 +#: ../../source/ref-changelog.md:889 ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:1011 ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1209 ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1318 ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1429 ../../source/ref-changelog.md:1468 +#: ../../source/ref-changelog.md:1501 ../../source/ref-changelog.md:1551 +msgid "What's new?" +msgstr "Quoi de neuf ?" #: ../../source/ref-changelog.md:13 #, fuzzy msgid "" -"**Implement** `keys/values/items` **methods for** `TypedDict` " -"([#4146](https://github.com/adap/flower/pull/4146))" +"**Fix `SimulationEngine` Executor for SuperLink** " +"([#4563](https://github.com/adap/flower/pull/4563), " +"[#4568](https://github.com/adap/flower/pull/4568), " +"[#4570](https://github.com/adap/flower/pull/4570))" msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" #: ../../source/ref-changelog.md:15 -#, fuzzy msgid "" -"**Fix parsing of** `--executor-config` **if present** " -"([#4125](https://github.com/adap/flower/pull/4125))" +"Resolved an issue that prevented SuperLink from functioning correctly " +"when using the `SimulationEngine` executor." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:17 #, fuzzy msgid "" -"**Adjust framework name in templates docstrings** " -"([#4127](https://github.com/adap/flower/pull/4127))" +"**Improve FAB build and install** " +"([#4571](https://github.com/adap/flower/pull/4571))" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" #: ../../source/ref-changelog.md:19 -#, fuzzy msgid "" -"**Update** `flwr new` **Hugging Face template** " -"([#4169](https://github.com/adap/flower/pull/4169))" +"An updated FAB build and install process produces smaller FAB files and " +"doesn't rely on `pip install` any more. It also resolves an issue where " +"all files were unnecessarily included in the FAB file. The `flwr` CLI " +"commands now correctly pack only the necessary files, such as `.md`, " +"`.toml` and `.py`, ensuring more efficient and accurate packaging." msgstr "" -"**Nouvel exemple de code pour les Transformers à visage embrassant** " -"([#863](https://github.com/adap/flower/pull/863))" #: ../../source/ref-changelog.md:21 #, fuzzy msgid "" -"**Fix** `flwr new` **FlowerTune template** " -"([#4123](https://github.com/adap/flower/pull/4123))" +"**Update** `embedded-devices` **example** " +"([#4381](https://github.com/adap/flower/pull/4381))" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:23 -#, fuzzy -msgid "" -"**Add buffer time after** `ServerApp` **thread initialization** " -"([#4119](https://github.com/adap/flower/pull/4119))" +msgid "The example now uses the `flwr run` command and the Deployment Engine." msgstr "" -"**Ajouter des mesures de formation à** `History` **objet pendant les " -"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" #: ../../source/ref-changelog.md:25 #, fuzzy msgid "" -"**Handle unsuitable resources for simulation** " -"([#4143](https://github.com/adap/flower/pull/4143))" +"**Update Documentation** " +"([#4566](https://github.com/adap/flower/pull/4566), " +"[#4569](https://github.com/adap/flower/pull/4569), " +"[#4560](https://github.com/adap/flower/pull/4560), " +"[#4556](https://github.com/adap/flower/pull/4556), " +"[#4581](https://github.com/adap/flower/pull/4581), " +"[#4537](https://github.com/adap/flower/pull/4537), " +"[#4562](https://github.com/adap/flower/pull/4562), " +"[#4582](https://github.com/adap/flower/pull/4582))" msgstr "" -"**Ajouter un nouveau guide pratique pour le suivi des simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" #: ../../source/ref-changelog.md:27 -#, fuzzy msgid "" -"**Update example READMEs** " -"([#4117](https://github.com/adap/flower/pull/4117))" +"Enhanced documentation across various aspects, including updates to " +"translation workflows, Docker-related READMEs, and recommended datasets. " +"Improvements also include formatting fixes for dataset partitioning docs " +"and better references to resources in the datasets documentation index." msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" #: ../../source/ref-changelog.md:29 #, fuzzy msgid "" -"**Update SuperNode authentication docs** " -"([#4160](https://github.com/adap/flower/pull/4160))" +"**Update Infrastructure and CI/CD** " +"([#4577](https://github.com/adap/flower/pull/4577), " +"[#4578](https://github.com/adap/flower/pull/4578), " +"[#4558](https://github.com/adap/flower/pull/4558), " +"[#4551](https://github.com/adap/flower/pull/4551), " +"[#3356](https://github.com/adap/flower/pull/3356), " +"[#4559](https://github.com/adap/flower/pull/4559), " +"[#4575](https://github.com/adap/flower/pull/4575))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 -#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 -#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 -#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 -#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 -#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 -#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 -#: ../../source/ref-changelog.md:1253 -msgid "Incompatible changes" -msgstr "Changements incompatibles" +#: ../../source/ref-changelog.md:31 +#, fuzzy +msgid "" +"**General improvements** " +"([#4557](https://github.com/adap/flower/pull/4557), " +"[#4564](https://github.com/adap/flower/pull/4564), " +"[#4573](https://github.com/adap/flower/pull/4573), " +"[#4561](https://github.com/adap/flower/pull/4561), " +"[#4579](https://github.com/adap/flower/pull/4579), " +"[#4572](https://github.com/adap/flower/pull/4572))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" + +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:198 ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:408 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" #: ../../source/ref-changelog.md:35 #, fuzzy -msgid "v1.11.0 (2024-08-30)" -msgstr "v1.3.0 (2023-02-06)" +msgid "v1.13.0 (2024-11-20)" +msgstr "v1.4.0 (2023-04-21)" #: ../../source/ref-changelog.md:41 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," -" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " -"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +"Beutel`, `Daniel Nata Nugraha`, `Dimitris Stripelis`, `Heng Pan`, " +"`Javier`, `Mohammad Naseri`, `Robert Steiner`, `Waris Gill`, `William " +"Lindskog`, `Yan Gao`, `Yao Xu`, `wwjang` " msgstr "" -#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 -#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 -#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 -#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 -#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 -#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 -#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 -#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 -#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 -#: ../../source/ref-changelog.md:1348 -msgid "What's new?" -msgstr "Quoi de neuf ?" - #: ../../source/ref-changelog.md:45 +#, fuzzy msgid "" -"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " -"([#4006](https://github.com/adap/flower/pull/4006), " -"[#3945](https://github.com/adap/flower/pull/3945), " -"[#3999](https://github.com/adap/flower/pull/3999), " -"[#4027](https://github.com/adap/flower/pull/4027), " -"[#3851](https://github.com/adap/flower/pull/3851), " -"[#3946](https://github.com/adap/flower/pull/3946), " -"[#4003](https://github.com/adap/flower/pull/4003), " -"[#4029](https://github.com/adap/flower/pull/4029), " -"[#3942](https://github.com/adap/flower/pull/3942), " -"[#3957](https://github.com/adap/flower/pull/3957), " -"[#4020](https://github.com/adap/flower/pull/4020), " -"[#4044](https://github.com/adap/flower/pull/4044), " -"[#3852](https://github.com/adap/flower/pull/3852), " -"[#4019](https://github.com/adap/flower/pull/4019), " -"[#4031](https://github.com/adap/flower/pull/4031), " -"[#4036](https://github.com/adap/flower/pull/4036), " -"[#4049](https://github.com/adap/flower/pull/4049), " -"[#4017](https://github.com/adap/flower/pull/4017), " -"[#3943](https://github.com/adap/flower/pull/3943), " -"[#3944](https://github.com/adap/flower/pull/3944), " -"[#4011](https://github.com/adap/flower/pull/4011), " -"[#3619](https://github.com/adap/flower/pull/3619))" +"**Introduce `flwr ls` command** " +"([#4460](https://github.com/adap/flower/pull/4460), " +"[#4459](https://github.com/adap/flower/pull/4459), " +"[#4477](https://github.com/adap/flower/pull/4477))" msgstr "" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" #: ../../source/ref-changelog.md:47 msgid "" -"Dynamic code updates are here! `flwr run` can now ship and install the " -"latest version of your `ServerApp` and `ClientApp` to an already-running " -"federation (SuperLink and SuperNodes)." +"The `flwr ls` command is now available to display details about all runs " +"(or one specific run). It supports the following usage options:" msgstr "" #: ../../source/ref-changelog.md:49 -msgid "" -"How does it work? `flwr run` bundles your Flower app into a single FAB " -"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," -" to both the SuperLink and those SuperNodes that need it. This allows you" -" to keep SuperExec, SuperLink and SuperNodes running as permanent " -"infrastructure, and then ship code updates (including completely new " -"projects!) dynamically." -msgstr "" - -#: ../../source/ref-changelog.md:51 -msgid "`flwr run` is all you need." +msgid "`flwr ls --runs [] []`: Lists all runs." msgstr "" -#: ../../source/ref-changelog.md:53 -#, fuzzy +#: ../../source/ref-changelog.md:50 msgid "" -"**Introduce isolated** `ClientApp` **execution** " -"([#3970](https://github.com/adap/flower/pull/3970), " -"[#3976](https://github.com/adap/flower/pull/3976), " -"[#4002](https://github.com/adap/flower/pull/4002), " -"[#4001](https://github.com/adap/flower/pull/4001), " -"[#4034](https://github.com/adap/flower/pull/4034), " -"[#4037](https://github.com/adap/flower/pull/4037), " -"[#3977](https://github.com/adap/flower/pull/3977), " -"[#4042](https://github.com/adap/flower/pull/4042), " -"[#3978](https://github.com/adap/flower/pull/3978), " -"[#4039](https://github.com/adap/flower/pull/4039), " -"[#4033](https://github.com/adap/flower/pull/4033), " -"[#3971](https://github.com/adap/flower/pull/3971), " -"[#4035](https://github.com/adap/flower/pull/4035), " -"[#3973](https://github.com/adap/flower/pull/3973), " -"[#4032](https://github.com/adap/flower/pull/4032))" +"`flwr ls --run-id [] []`: Displays details for " +"a specific run." msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:52 msgid "" -"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" -" enterprise deployment, this allows you to set strict limits on what the " -"`ClientApp` can and cannot do." +"This command provides information including the run ID, FAB ID and " +"version, run status, elapsed time, and timestamps for when the run was " +"created, started running, and finished." msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "`flower-supernode` supports three `--isolation` modes:" +#: ../../source/ref-changelog.md:54 +#, fuzzy +msgid "" +"**Fuse SuperLink and SuperExec** " +"([#4358](https://github.com/adap/flower/pull/4358), " +"[#4403](https://github.com/adap/flower/pull/4403), " +"[#4406](https://github.com/adap/flower/pull/4406), " +"[#4357](https://github.com/adap/flower/pull/4357), " +"[#4359](https://github.com/adap/flower/pull/4359), " +"[#4354](https://github.com/adap/flower/pull/4354), " +"[#4229](https://github.com/adap/flower/pull/4229), " +"[#4283](https://github.com/adap/flower/pull/4283), " +"[#4352](https://github.com/adap/flower/pull/4352))" msgstr "" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:56 msgid "" -"Unset: The SuperNode runs the `ClientApp` in the same process (as in " -"previous versions of Flower). This is the default mode." +"SuperExec has been integrated into SuperLink, enabling SuperLink to " +"directly manage ServerApp processes (`flwr-serverapp`). The `flwr` CLI " +"now targets SuperLink's Exec API. Additionally, SuperLink introduces two " +"isolation modes for running ServerApps: `subprocess` (default) and " +"`process`, which can be specified using the `--isolation " +"{subprocess,process}` flag." msgstr "" -#: ../../source/ref-changelog.md:60 +#: ../../source/ref-changelog.md:58 +#, fuzzy msgid "" -"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " -"`ClientApp`." +"**Introduce `flwr-serverapp` command** " +"([#4394](https://github.com/adap/flower/pull/4394), " +"[#4370](https://github.com/adap/flower/pull/4370), " +"[#4367](https://github.com/adap/flower/pull/4367), " +"[#4350](https://github.com/adap/flower/pull/4350), " +"[#4364](https://github.com/adap/flower/pull/4364), " +"[#4400](https://github.com/adap/flower/pull/4400), " +"[#4363](https://github.com/adap/flower/pull/4363), " +"[#4401](https://github.com/adap/flower/pull/4401), " +"[#4388](https://github.com/adap/flower/pull/4388), " +"[#4402](https://github.com/adap/flower/pull/4402))" msgstr "" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:60 msgid "" -"`--isolation=process`: The SuperNode expects an externally-managed " -"process to run the `ClientApp`. This external process is not managed by " -"the SuperNode, so it has to be started beforehand and terminated " -"manually. The common way to use this isolation mode is via the new " -"`flwr/clientapp` Docker image." +"The `flwr-serverapp` command has been introduced as a CLI entry point " +"that runs a `ServerApp` process. This process communicates with SuperLink" +" to load and execute the `ServerApp` object, enabling isolated execution " +"and more flexible deployment." msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:62 #, fuzzy msgid "" -"**Improve Docker support for enterprise deployments** " -"([#4050](https://github.com/adap/flower/pull/4050), " -"[#4090](https://github.com/adap/flower/pull/4090), " -"[#3784](https://github.com/adap/flower/pull/3784), " -"[#3998](https://github.com/adap/flower/pull/3998), " -"[#4094](https://github.com/adap/flower/pull/4094), " -"[#3722](https://github.com/adap/flower/pull/3722))" +"**Improve simulation engine and introduce `flwr-simulation` command** " +"([#4433](https://github.com/adap/flower/pull/4433), " +"[#4486](https://github.com/adap/flower/pull/4486), " +"[#4448](https://github.com/adap/flower/pull/4448), " +"[#4427](https://github.com/adap/flower/pull/4427), " +"[#4438](https://github.com/adap/flower/pull/4438), " +"[#4421](https://github.com/adap/flower/pull/4421), " +"[#4430](https://github.com/adap/flower/pull/4430), " +"[#4462](https://github.com/adap/flower/pull/4462))" msgstr "" "**Nouvel exemple de code MLCube** " "([#779](https://github.com/adap/flower/pull/779), " @@ -17460,70 +16950,158 @@ msgstr "" "[#1065](https://github.com/adap/flower/pull/1065), " "[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:64 msgid "" -"Flower 1.11 ships many Docker improvements that are especially useful for" -" enterprise deployments:" -msgstr "" - -#: ../../source/ref-changelog.md:67 -msgid "`flwr/supernode` comes with a new Alpine Docker image." +"The simulation engine has been significantly improved, resulting in " +"dramatically faster simulations. Additionally, the `flwr-simulation` " +"command has been introduced to enhance maintainability and provide a " +"dedicated entry point for running simulations." msgstr "" -#: ../../source/ref-changelog.md:68 +#: ../../source/ref-changelog.md:66 +#, fuzzy msgid "" -"`flwr/clientapp` is a new image to be used with the `--isolation=process`" -" option. In this mode, SuperNode and `ClientApp` run in two different " -"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " -"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " -"runs the `ClientApp`. This is the recommended way to deploy Flower in " -"enterprise settings." +"**Improve SuperLink message management** " +"([#4378](https://github.com/adap/flower/pull/4378), " +"[#4369](https://github.com/adap/flower/pull/4369))" msgstr "" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:68 msgid "" -"New all-in-one Docker Compose enables you to easily start a full Flower " -"Deployment Engine on a single machine." +"SuperLink now validates the destination node ID of instruction messages " +"and checks the TTL (time-to-live) for reply messages. When pulling reply " +"messages, an error reply will be generated and returned if the " +"corresponding instruction message does not exist, has expired, or if the " +"reply message exists but has expired." msgstr "" #: ../../source/ref-changelog.md:70 +#, fuzzy msgid "" -"Completely new Docker documentation: " -"https://flower.ai/docs/framework/docker/index.html" +"**Introduce FedDebug baseline** " +"([#3783](https://github.com/adap/flower/pull/3783))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:72 -#, fuzzy msgid "" -"**Improve SuperNode authentication** " -"([#4043](https://github.com/adap/flower/pull/4043), " -"[#4047](https://github.com/adap/flower/pull/4047), " -"[#4074](https://github.com/adap/flower/pull/4074))" +"FedDebug is a framework that enhances debugging in Federated Learning by " +"enabling interactive inspection of the training process and automatically" +" identifying clients responsible for degrading the global model's " +"performance—all without requiring testing data or labels. Learn more in " +"the [FedDebug baseline " +"documentation](https://flower.ai/docs/baselines/feddebug.html)." msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" #: ../../source/ref-changelog.md:74 msgid "" -"SuperNode auth has been improved in several ways, including improved " -"logging, improved testing, and improved error handling." +"**Update documentation** " +"([#4511](https://github.com/adap/flower/pull/4511), " +"[#4010](https://github.com/adap/flower/pull/4010), " +"[#4396](https://github.com/adap/flower/pull/4396), " +"[#4499](https://github.com/adap/flower/pull/4499), " +"[#4269](https://github.com/adap/flower/pull/4269), " +"[#3340](https://github.com/adap/flower/pull/3340), " +"[#4482](https://github.com/adap/flower/pull/4482), " +"[#4387](https://github.com/adap/flower/pull/4387), " +"[#4342](https://github.com/adap/flower/pull/4342), " +"[#4492](https://github.com/adap/flower/pull/4492), " +"[#4474](https://github.com/adap/flower/pull/4474), " +"[#4500](https://github.com/adap/flower/pull/4500), " +"[#4514](https://github.com/adap/flower/pull/4514), " +"[#4236](https://github.com/adap/flower/pull/4236), " +"[#4112](https://github.com/adap/flower/pull/4112), " +"[#3367](https://github.com/adap/flower/pull/3367), " +"[#4501](https://github.com/adap/flower/pull/4501), " +"[#4373](https://github.com/adap/flower/pull/4373), " +"[#4409](https://github.com/adap/flower/pull/4409), " +"[#4356](https://github.com/adap/flower/pull/4356), " +"[#4520](https://github.com/adap/flower/pull/4520), " +"[#4524](https://github.com/adap/flower/pull/4524), " +"[#4525](https://github.com/adap/flower/pull/4525), " +"[#4526](https://github.com/adap/flower/pull/4526), " +"[#4527](https://github.com/adap/flower/pull/4527), " +"[#4528](https://github.com/adap/flower/pull/4528), " +"[#4545](https://github.com/adap/flower/pull/4545), " +"[#4522](https://github.com/adap/flower/pull/4522), " +"[#4534](https://github.com/adap/flower/pull/4534), " +"[#4513](https://github.com/adap/flower/pull/4513), " +"[#4529](https://github.com/adap/flower/pull/4529), " +"[#4441](https://github.com/adap/flower/pull/4441), " +"[#4530](https://github.com/adap/flower/pull/4530), " +"[#4470](https://github.com/adap/flower/pull/4470), " +"[#4553](https://github.com/adap/flower/pull/4553), " +"[#4531](https://github.com/adap/flower/pull/4531), " +"[#4554](https://github.com/adap/flower/pull/4554), " +"[#4555](https://github.com/adap/flower/pull/4555), " +"[#4552](https://github.com/adap/flower/pull/4552), " +"[#4533](https://github.com/adap/flower/pull/4533))" msgstr "" #: ../../source/ref-changelog.md:76 +msgid "" +"Many documentation pages and tutorials have been updated to improve " +"clarity, fix typos, incorporate user feedback, and stay aligned with the " +"latest features in the framework. Key updates include adding a guide for " +"designing stateful `ClientApp` objects, updating the comprehensive guide " +"for setting up and running Flower's `Simulation Engine`, updating the " +"XGBoost, scikit-learn, and JAX quickstart tutorials to use `flwr run`, " +"updating DP guide, removing outdated pages, updating Docker docs, and " +"marking legacy functions as deprecated. The [Secure Aggregation " +"Protocols](https://flower.ai/docs/framework/contributor-ref-secure-" +"aggregation-protocols.html) page has also been updated." +msgstr "" + +#: ../../source/ref-changelog.md:78 #, fuzzy msgid "" -"**Update** `flwr new` **templates** " -"([#3933](https://github.com/adap/flower/pull/3933), " -"[#3894](https://github.com/adap/flower/pull/3894), " -"[#3930](https://github.com/adap/flower/pull/3930), " -"[#3931](https://github.com/adap/flower/pull/3931), " -"[#3997](https://github.com/adap/flower/pull/3997), " -"[#3979](https://github.com/adap/flower/pull/3979), " -"[#3965](https://github.com/adap/flower/pull/3965), " -"[#4013](https://github.com/adap/flower/pull/4013), " -"[#4064](https://github.com/adap/flower/pull/4064))" +"**Update examples and templates** " +"([#4510](https://github.com/adap/flower/pull/4510), " +"[#4368](https://github.com/adap/flower/pull/4368), " +"[#4121](https://github.com/adap/flower/pull/4121), " +"[#4329](https://github.com/adap/flower/pull/4329), " +"[#4382](https://github.com/adap/flower/pull/4382), " +"[#4248](https://github.com/adap/flower/pull/4248), " +"[#4395](https://github.com/adap/flower/pull/4395), " +"[#4386](https://github.com/adap/flower/pull/4386), " +"[#4408](https://github.com/adap/flower/pull/4408))" +msgstr "" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" + +#: ../../source/ref-changelog.md:80 +msgid "" +"Multiple examples and templates have been updated to enhance usability " +"and correctness. The updates include the `30-minute-tutorial`, " +"`quickstart-jax`, `quickstart-pytorch`, `advanced-tensorflow` examples, " +"and the FlowerTune template." +msgstr "" + +#: ../../source/ref-changelog.md:82 +#, fuzzy +msgid "" +"**Improve Docker support** " +"([#4506](https://github.com/adap/flower/pull/4506), " +"[#4424](https://github.com/adap/flower/pull/4424), " +"[#4224](https://github.com/adap/flower/pull/4224), " +"[#4413](https://github.com/adap/flower/pull/4413), " +"[#4414](https://github.com/adap/flower/pull/4414), " +"[#4336](https://github.com/adap/flower/pull/4336), " +"[#4420](https://github.com/adap/flower/pull/4420), " +"[#4407](https://github.com/adap/flower/pull/4407), " +"[#4422](https://github.com/adap/flower/pull/4422), " +"[#4532](https://github.com/adap/flower/pull/4532), " +"[#4540](https://github.com/adap/flower/pull/4540))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -17534,294 +17112,345 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:78 +#: ../../source/ref-changelog.md:84 msgid "" -"All `flwr new` templates have been updated to show the latest recommended" -" use of Flower APIs." +"Docker images and configurations have been updated, including updating " +"Docker Compose files to version 1.13.0, refactoring the Docker build " +"matrix for better maintainability, updating `docker/build-push-action` to" +" 6.9.0, and improving Docker documentation." msgstr "" -#: ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:86 #, fuzzy msgid "" -"**Improve Simulation Engine** " -"([#4095](https://github.com/adap/flower/pull/4095), " -"[#3913](https://github.com/adap/flower/pull/3913), " -"[#4059](https://github.com/adap/flower/pull/4059), " -"[#3954](https://github.com/adap/flower/pull/3954), " -"[#4071](https://github.com/adap/flower/pull/4071), " -"[#3985](https://github.com/adap/flower/pull/3985), " -"[#3988](https://github.com/adap/flower/pull/3988))" +"**Allow app installation without internet access** " +"([#4479](https://github.com/adap/flower/pull/4479), " +"[#4475](https://github.com/adap/flower/pull/4475))" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:82 +#: ../../source/ref-changelog.md:88 msgid "" -"The Flower Simulation Engine comes with several updates, including " -"improved run config support, verbose logging, simulation backend " -"configuration via `flwr run`, and more." +"The `flwr build` command now includes a wheel file in the FAB, enabling " +"Flower app installation in environments without internet access via `flwr" +" install`." msgstr "" -#: ../../source/ref-changelog.md:84 +#: ../../source/ref-changelog.md:90 #, fuzzy msgid "" -"**Improve** `RecordSet` " -"([#4052](https://github.com/adap/flower/pull/4052), " -"[#3218](https://github.com/adap/flower/pull/3218), " -"[#4016](https://github.com/adap/flower/pull/4016))" +"**Improve `flwr log` command** " +"([#4391](https://github.com/adap/flower/pull/4391), " +"[#4411](https://github.com/adap/flower/pull/4411), " +"[#4390](https://github.com/adap/flower/pull/4390), " +"[#4397](https://github.com/adap/flower/pull/4397))" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:86 +#: ../../source/ref-changelog.md:92 +#, fuzzy msgid "" -"`RecordSet` is the core object to exchange model parameters, " -"configuration values and metrics between `ClientApp` and `ServerApp`. " -"This release ships several smaller improvements to `RecordSet` and " -"related `*Record` types." +"**Refactor SuperNode for better maintainability and efficiency** " +"([#4439](https://github.com/adap/flower/pull/4439), " +"[#4348](https://github.com/adap/flower/pull/4348), " +"[#4512](https://github.com/adap/flower/pull/4512), " +"[#4485](https://github.com/adap/flower/pull/4485))" msgstr "" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:88 +#: ../../source/ref-changelog.md:94 #, fuzzy msgid "" -"**Update documentation** " -"([#3972](https://github.com/adap/flower/pull/3972), " -"[#3925](https://github.com/adap/flower/pull/3925), " -"[#4061](https://github.com/adap/flower/pull/4061), " -"[#3984](https://github.com/adap/flower/pull/3984), " -"[#3917](https://github.com/adap/flower/pull/3917), " -"[#3900](https://github.com/adap/flower/pull/3900), " -"[#4066](https://github.com/adap/flower/pull/4066), " -"[#3765](https://github.com/adap/flower/pull/3765), " -"[#4021](https://github.com/adap/flower/pull/4021), " -"[#3906](https://github.com/adap/flower/pull/3906), " -"[#4063](https://github.com/adap/flower/pull/4063), " -"[#4076](https://github.com/adap/flower/pull/4076), " -"[#3920](https://github.com/adap/flower/pull/3920), " -"[#3916](https://github.com/adap/flower/pull/3916))" +"**Support NumPy `2.0`** " +"([#4440](https://github.com/adap/flower/pull/4440))" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:90 +#: ../../source/ref-changelog.md:96 +#, fuzzy msgid "" -"Many parts of the documentation, including the main tutorial, have been " -"migrated to show new Flower APIs and other new Flower features like the " -"improved Docker support." +"**Update infrastructure and CI/CD** " +"([#4466](https://github.com/adap/flower/pull/4466), " +"[#4419](https://github.com/adap/flower/pull/4419), " +"[#4338](https://github.com/adap/flower/pull/4338), " +"[#4334](https://github.com/adap/flower/pull/4334), " +"[#4456](https://github.com/adap/flower/pull/4456), " +"[#4446](https://github.com/adap/flower/pull/4446), " +"[#4415](https://github.com/adap/flower/pull/4415))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:92 +#: ../../source/ref-changelog.md:98 +#, fuzzy msgid "" -"**Migrate code example to use new Flower APIs** " -"([#3758](https://github.com/adap/flower/pull/3758), " -"[#3701](https://github.com/adap/flower/pull/3701), " -"[#3919](https://github.com/adap/flower/pull/3919), " -"[#3918](https://github.com/adap/flower/pull/3918), " -"[#3934](https://github.com/adap/flower/pull/3934), " -"[#3893](https://github.com/adap/flower/pull/3893), " -"[#3833](https://github.com/adap/flower/pull/3833), " -"[#3922](https://github.com/adap/flower/pull/3922), " -"[#3846](https://github.com/adap/flower/pull/3846), " -"[#3777](https://github.com/adap/flower/pull/3777), " -"[#3874](https://github.com/adap/flower/pull/3874), " -"[#3873](https://github.com/adap/flower/pull/3873), " -"[#3935](https://github.com/adap/flower/pull/3935), " -"[#3754](https://github.com/adap/flower/pull/3754), " -"[#3980](https://github.com/adap/flower/pull/3980), " -"[#4089](https://github.com/adap/flower/pull/4089), " -"[#4046](https://github.com/adap/flower/pull/4046), " -"[#3314](https://github.com/adap/flower/pull/3314), " -"[#3316](https://github.com/adap/flower/pull/3316), " -"[#3295](https://github.com/adap/flower/pull/3295), " -"[#3313](https://github.com/adap/flower/pull/3313))" -msgstr "" - -#: ../../source/ref-changelog.md:94 -msgid "Many code examples have been migrated to use new Flower APIs." +"**Bugfixes** ([#4404](https://github.com/adap/flower/pull/4404), " +"[#4518](https://github.com/adap/flower/pull/4518), " +"[#4452](https://github.com/adap/flower/pull/4452), " +"[#4376](https://github.com/adap/flower/pull/4376), " +"[#4493](https://github.com/adap/flower/pull/4493), " +"[#4436](https://github.com/adap/flower/pull/4436), " +"[#4410](https://github.com/adap/flower/pull/4410), " +"[#4442](https://github.com/adap/flower/pull/4442), " +"[#4375](https://github.com/adap/flower/pull/4375), " +"[#4515](https://github.com/adap/flower/pull/4515))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:96 +#: ../../source/ref-changelog.md:100 msgid "" -"**Update Flower framework, framework internals and quality " -"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " -"[#4053](https://github.com/adap/flower/pull/4053), " -"[#4098](https://github.com/adap/flower/pull/4098), " -"[#4067](https://github.com/adap/flower/pull/4067), " -"[#4105](https://github.com/adap/flower/pull/4105), " -"[#4048](https://github.com/adap/flower/pull/4048), " -"[#4107](https://github.com/adap/flower/pull/4107), " -"[#4069](https://github.com/adap/flower/pull/4069), " -"[#3915](https://github.com/adap/flower/pull/3915), " -"[#4101](https://github.com/adap/flower/pull/4101), " -"[#4108](https://github.com/adap/flower/pull/4108), " -"[#3914](https://github.com/adap/flower/pull/3914), " -"[#4068](https://github.com/adap/flower/pull/4068), " -"[#4041](https://github.com/adap/flower/pull/4041), " -"[#4040](https://github.com/adap/flower/pull/4040), " -"[#3986](https://github.com/adap/flower/pull/3986), " -"[#4026](https://github.com/adap/flower/pull/4026), " -"[#3961](https://github.com/adap/flower/pull/3961), " -"[#3975](https://github.com/adap/flower/pull/3975), " -"[#3983](https://github.com/adap/flower/pull/3983), " -"[#4091](https://github.com/adap/flower/pull/4091), " -"[#3982](https://github.com/adap/flower/pull/3982), " -"[#4079](https://github.com/adap/flower/pull/4079), " -"[#4073](https://github.com/adap/flower/pull/4073), " -"[#4060](https://github.com/adap/flower/pull/4060), " -"[#4106](https://github.com/adap/flower/pull/4106), " -"[#4080](https://github.com/adap/flower/pull/4080), " -"[#3974](https://github.com/adap/flower/pull/3974), " -"[#3996](https://github.com/adap/flower/pull/3996), " -"[#3991](https://github.com/adap/flower/pull/3991), " -"[#3981](https://github.com/adap/flower/pull/3981), " -"[#4093](https://github.com/adap/flower/pull/4093), " -"[#4100](https://github.com/adap/flower/pull/4100), " -"[#3939](https://github.com/adap/flower/pull/3939), " -"[#3955](https://github.com/adap/flower/pull/3955), " -"[#3940](https://github.com/adap/flower/pull/3940), " -"[#4038](https://github.com/adap/flower/pull/4038))" -msgstr "" +"**General improvements** " +"([#4454](https://github.com/adap/flower/pull/4454), " +"[#4365](https://github.com/adap/flower/pull/4365), " +"[#4423](https://github.com/adap/flower/pull/4423), " +"[#4516](https://github.com/adap/flower/pull/4516), " +"[#4509](https://github.com/adap/flower/pull/4509), " +"[#4498](https://github.com/adap/flower/pull/4498), " +"[#4371](https://github.com/adap/flower/pull/4371), " +"[#4449](https://github.com/adap/flower/pull/4449), " +"[#4488](https://github.com/adap/flower/pull/4488), " +"[#4478](https://github.com/adap/flower/pull/4478), " +"[#4392](https://github.com/adap/flower/pull/4392), " +"[#4483](https://github.com/adap/flower/pull/4483), " +"[#4517](https://github.com/adap/flower/pull/4517), " +"[#4330](https://github.com/adap/flower/pull/4330), " +"[#4458](https://github.com/adap/flower/pull/4458), " +"[#4347](https://github.com/adap/flower/pull/4347), " +"[#4429](https://github.com/adap/flower/pull/4429), " +"[#4463](https://github.com/adap/flower/pull/4463), " +"[#4496](https://github.com/adap/flower/pull/4496), " +"[#4508](https://github.com/adap/flower/pull/4508), " +"[#4444](https://github.com/adap/flower/pull/4444), " +"[#4417](https://github.com/adap/flower/pull/4417), " +"[#4504](https://github.com/adap/flower/pull/4504), " +"[#4418](https://github.com/adap/flower/pull/4418), " +"[#4480](https://github.com/adap/flower/pull/4480), " +"[#4455](https://github.com/adap/flower/pull/4455), " +"[#4468](https://github.com/adap/flower/pull/4468), " +"[#4385](https://github.com/adap/flower/pull/4385), " +"[#4487](https://github.com/adap/flower/pull/4487), " +"[#4393](https://github.com/adap/flower/pull/4393), " +"[#4489](https://github.com/adap/flower/pull/4489), " +"[#4389](https://github.com/adap/flower/pull/4389), " +"[#4507](https://github.com/adap/flower/pull/4507), " +"[#4469](https://github.com/adap/flower/pull/4469), " +"[#4340](https://github.com/adap/flower/pull/4340), " +"[#4353](https://github.com/adap/flower/pull/4353), " +"[#4494](https://github.com/adap/flower/pull/4494), " +"[#4461](https://github.com/adap/flower/pull/4461), " +"[#4362](https://github.com/adap/flower/pull/4362), " +"[#4473](https://github.com/adap/flower/pull/4473), " +"[#4405](https://github.com/adap/flower/pull/4405), " +"[#4416](https://github.com/adap/flower/pull/4416), " +"[#4453](https://github.com/adap/flower/pull/4453), " +"[#4491](https://github.com/adap/flower/pull/4491), " +"[#4539](https://github.com/adap/flower/pull/4539), " +"[#4542](https://github.com/adap/flower/pull/4542), " +"[#4538](https://github.com/adap/flower/pull/4538), " +"[#4543](https://github.com/adap/flower/pull/4543), " +"[#4541](https://github.com/adap/flower/pull/4541), " +"[#4550](https://github.com/adap/flower/pull/4550), " +"[#4481](https://github.com/adap/flower/pull/4481))" +msgstr "" + +#: ../../source/ref-changelog.md:104 ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:420 ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:1495 +msgid "Deprecations" +msgstr "Dépréciations" + +#: ../../source/ref-changelog.md:106 +#, fuzzy +msgid "**Deprecate Python 3.9**" +msgstr "**Créer le PR**" -#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:108 msgid "" -"As always, many parts of the Flower framework and quality infrastructure " -"were improved and updated." +"Flower is deprecating support for Python 3.9 as several of its " +"dependencies are phasing out compatibility with this version. While no " +"immediate changes have been made, users are encouraged to plan for " +"upgrading to a supported Python version." msgstr "" -#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 -#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 -msgid "Deprecations" -msgstr "Dépréciations" +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:200 +#: ../../source/ref-changelog.md:234 ../../source/ref-changelog.md:314 +#: ../../source/ref-changelog.md:430 ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:600 ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:787 ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:941 ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:1068 ../../source/ref-changelog.md:1130 +#: ../../source/ref-changelog.md:1149 ../../source/ref-changelog.md:1305 +#: ../../source/ref-changelog.md:1376 ../../source/ref-changelog.md:1413 +#: ../../source/ref-changelog.md:1456 +msgid "Incompatible changes" +msgstr "Changements incompatibles" -#: ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:112 #, fuzzy msgid "" -"**Deprecate accessing `Context` via `Client.context`** " -"([#3797](https://github.com/adap/flower/pull/3797))" +"**Remove `flower-superexec` command** " +"([#4351](https://github.com/adap/flower/pull/4351))" msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:114 msgid "" -"Now that both `client_fn` and `server_fn` receive a `Context` object, " -"accessing `Context` via `Client.context` is deprecated. `Client.context` " -"will be removed in a future release. If you need to access `Context` in " -"your `Client` implementation, pass it manually when creating the `Client`" -" instance in `client_fn`:" +"The `flower-superexec` command, previously used to launch SuperExec, is " +"no longer functional as SuperExec has been merged into SuperLink. " +"Starting an additional SuperExec is no longer necessary when SuperLink is" +" initiated." msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:116 #, fuzzy msgid "" -"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" -" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " -"[#4077](https://github.com/adap/flower/pull/4077), " -"[#3850](https://github.com/adap/flower/pull/3850))" +"**Remove `flower-server-app` command** " +"([#4490](https://github.com/adap/flower/pull/4490))" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:118 msgid "" -"The CLI commands `flower-supernode` and `flower-server-app` now accept an" -" app directory as argument (instead of references to a `ClientApp` or " -"`ServerApp`). An app directory is any directory containing a " -"`pyproject.toml` file (with the appropriate Flower config fields set). " -"The easiest way to generate a compatible project structure is to use " -"`flwr new`." +"The `flower-server-app` command has been removed. To start a Flower app, " +"please use the `flwr run` command instead." msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:120 #, fuzzy msgid "" -"**Disable** `flower-client-app` **CLI command** " -"([#4022](https://github.com/adap/flower/pull/4022))" +"**Remove `app` argument from `flower-supernode` command** " +"([#4497](https://github.com/adap/flower/pull/4497))" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:119 -msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." +#: ../../source/ref-changelog.md:122 +msgid "" +"The usage of `flower-supernode ` has been removed. SuperNode " +"will now load the FAB delivered by SuperLink, and it is no longer " +"possible to directly specify an app directory." msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:124 #, fuzzy msgid "" -"**Use spaces instead of commas for separating config args** " -"([#4000](https://github.com/adap/flower/pull/4000))" +"**Remove support for non-app simulations** " +"([#4431](https://github.com/adap/flower/pull/4431))" msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:126 msgid "" -"When passing configs (run config, node config) to Flower, you now need to" -" separate key-value pairs using spaces instead of commas. For example:" +"The simulation engine (via `flower-simulation`) now exclusively supports " +"passing an app." msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "Previously, you could pass configs using commas, like this:" +#: ../../source/ref-changelog.md:128 +#, fuzzy +msgid "" +"**Rename CLI arguments for `flower-superlink` command** " +"([#4412](https://github.com/adap/flower/pull/4412))" +msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" + +#: ../../source/ref-changelog.md:130 +msgid "" +"The `--driver-api-address` argument has been renamed to `--serverappio-" +"api-address` in the `flower-superlink` command to reflect the renaming of" +" the `Driver` service to the `ServerAppIo` service." msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:132 #, fuzzy msgid "" -"**Remove** `flwr example` **CLI command** " -"([#4084](https://github.com/adap/flower/pull/4084))" +"**Rename CLI arguments for `flwr-serverapp` and `flwr-clientapp` " +"commands** ([#4495](https://github.com/adap/flower/pull/4495))" msgstr "" "**Supprimez KerasClient** " "([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:134 msgid "" -"The experimental `flwr example` CLI command has been removed. Use `flwr " -"new` to generate a project and then run it using `flwr run`." +"The CLI arguments have been renamed for clarity and consistency. " +"Specifically, `--superlink` for `flwr-serverapp` is now `--serverappio-" +"api-address`, and `--supernode` for `flwr-clientapp` is now " +"`--clientappio-api-address`." msgstr "" -#: ../../source/ref-changelog.md:139 +#: ../../source/ref-changelog.md:136 #, fuzzy -msgid "v1.10.0 (2024-07-24)" -msgstr "v1.0.0 (2022-07-28)" +msgid "v1.12.0 (2024-10-14)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:142 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " -"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " -"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " +"`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel" +" J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, " +"`Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan " +"Gao`, `xiliguguagua` " msgstr "" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:146 #, fuzzy msgid "" -"**Introduce** `flwr run` **(beta)** " -"([#3810](https://github.com/adap/flower/pull/3810), " -"[#3826](https://github.com/adap/flower/pull/3826), " -"[#3880](https://github.com/adap/flower/pull/3880), " -"[#3807](https://github.com/adap/flower/pull/3807), " -"[#3800](https://github.com/adap/flower/pull/3800), " -"[#3814](https://github.com/adap/flower/pull/3814), " -"[#3811](https://github.com/adap/flower/pull/3811), " -"[#3809](https://github.com/adap/flower/pull/3809), " -"[#3819](https://github.com/adap/flower/pull/3819))" +"**Introduce SuperExec log streaming** " +"([#3577](https://github.com/adap/flower/pull/3577), " +"[#3584](https://github.com/adap/flower/pull/3584), " +"[#4242](https://github.com/adap/flower/pull/4242), " +"[#3611](https://github.com/adap/flower/pull/3611), " +"[#3613](https://github.com/adap/flower/pull/3613))" +msgstr "" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" + +#: ../../source/ref-changelog.md:148 +msgid "" +"Flower now supports log streaming from a remote SuperExec using the `flwr" +" log` command. This new feature allows you to monitor logs from SuperExec" +" in real time via `flwr log ` (or `flwr log " +"`)." +msgstr "" + +#: ../../source/ref-changelog.md:150 +#, fuzzy +msgid "" +"**Improve `flwr new` templates** " +"([#4291](https://github.com/adap/flower/pull/4291), " +"[#4292](https://github.com/adap/flower/pull/4292), " +"[#4293](https://github.com/adap/flower/pull/4293), " +"[#4294](https://github.com/adap/flower/pull/4294), " +"[#4295](https://github.com/adap/flower/pull/4295))" msgstr "" "**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," " [#1470](https://github.com/adap/flower/pull/1470), " @@ -17830,126 +17459,179 @@ msgstr "" "[#1474](https://github.com/adap/flower/pull/1474), " "[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:152 msgid "" -"Flower 1.10 ships the first beta release of the new `flwr run` command. " -"`flwr run` can run different projects using `flwr run path/to/project`, " -"it enables you to easily switch between different federations using `flwr" -" run . federation` and it runs your Flower project using either local " -"simulation or the new (experimental) SuperExec service. This allows " -"Flower to scale federatated learning from fast local simulation to large-" -"scale production deployment, seamlessly. All projects generated with " -"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " -"`flwr new` to generate a project and then run it using `flwr run`." +"The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and " +"PyTorch have been updated to improve usability and consistency across " +"frameworks." msgstr "" -#: ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:154 #, fuzzy msgid "" -"**Introduce run config** " -"([#3751](https://github.com/adap/flower/pull/3751), " -"[#3750](https://github.com/adap/flower/pull/3750), " -"[#3845](https://github.com/adap/flower/pull/3845), " -"[#3824](https://github.com/adap/flower/pull/3824), " -"[#3746](https://github.com/adap/flower/pull/3746), " -"[#3728](https://github.com/adap/flower/pull/3728), " -"[#3730](https://github.com/adap/flower/pull/3730), " -"[#3725](https://github.com/adap/flower/pull/3725), " -"[#3729](https://github.com/adap/flower/pull/3729), " -"[#3580](https://github.com/adap/flower/pull/3580), " -"[#3578](https://github.com/adap/flower/pull/3578), " -"[#3576](https://github.com/adap/flower/pull/3576), " -"[#3798](https://github.com/adap/flower/pull/3798), " -"[#3732](https://github.com/adap/flower/pull/3732), " -"[#3815](https://github.com/adap/flower/pull/3815))" +"**Migrate ID handling to use unsigned 64-bit integers** " +"([#4170](https://github.com/adap/flower/pull/4170), " +"[#4237](https://github.com/adap/flower/pull/4237), " +"[#4243](https://github.com/adap/flower/pull/4243))" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:156 msgid "" -"The new run config feature allows you to run your Flower project in " -"different configurations without having to change a single line of code. " -"You can now build a configurable `ServerApp` and `ClientApp` that read " -"configuration values at runtime. This enables you to specify config " -"values like `learning-rate=0.01` in `pyproject.toml` (under the " -"`[tool.flwr.app.config]` key). These config values can then be easily " -"overridden via `flwr run --run-config learning-rate=0.02`, and read from " -"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " -"new project using `flwr new` to see run config in action." +"Node IDs, run IDs, and related fields have been migrated from signed " +"64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To " +"support this change, the `uint64` type is fully supported in all " +"communications. You may now use `uint64` values in config and metric " +"dictionaries. For Python users, that means using `int` values larger than" +" the maximum value of `sint64` but less than the maximum value of " +"`uint64`." msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:158 #, fuzzy msgid "" -"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " -"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " -"[#3697](https://github.com/adap/flower/pull/3697), " -"[#3694](https://github.com/adap/flower/pull/3694), " -"[#3696](https://github.com/adap/flower/pull/3696))" +"**Add Flower architecture explanation** " +"([#3270](https://github.com/adap/flower/pull/3270))" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:160 msgid "" -"The `client_fn` signature has been generalized to `client_fn(context: " -"Context) -> Client`. It now receives a `Context` object instead of the " -"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " -"`node_config` and `run_config`, among other things. This enables you to " -"build a configurable `ClientApp` that leverages the new run config " -"system." +"A new [Flower architecture explainer](https://flower.ai/docs/framework" +"/explanation-flower-architecture.html) page introduces Flower components " +"step-by-step. Check out the `EXPLANATIONS` section of the Flower " +"documentation if you're interested." msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:162 +#, fuzzy msgid "" -"The previous signature `client_fn(cid: str)` is now deprecated and " -"support for it will be removed in a future release. Use " -"`client_fn(context: Context) -> Client` everywhere." +"**Introduce FedRep baseline** " +"([#3790](https://github.com/adap/flower/pull/3790))" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:164 +msgid "" +"FedRep is a federated learning algorithm that learns shared data " +"representations across clients while allowing each to maintain " +"personalized local models, balancing collaboration and individual " +"adaptation. Read all the details in the paper: \"Exploiting Shared " +"Representations for Personalized Federated Learning\" " +"([arxiv](https://arxiv.org/abs/2102.07078))" +msgstr "" + +#: ../../source/ref-changelog.md:166 +#, fuzzy +msgid "" +"**Improve FlowerTune template and LLM evaluation pipelines** " +"([#4286](https://github.com/adap/flower/pull/4286), " +"[#3769](https://github.com/adap/flower/pull/3769), " +"[#4272](https://github.com/adap/flower/pull/4272), " +"[#4257](https://github.com/adap/flower/pull/4257), " +"[#4220](https://github.com/adap/flower/pull/4220), " +"[#4282](https://github.com/adap/flower/pull/4282), " +"[#4171](https://github.com/adap/flower/pull/4171), " +"[#4228](https://github.com/adap/flower/pull/4228), " +"[#4258](https://github.com/adap/flower/pull/4258), " +"[#4296](https://github.com/adap/flower/pull/4296), " +"[#4287](https://github.com/adap/flower/pull/4287), " +"[#4217](https://github.com/adap/flower/pull/4217), " +"[#4249](https://github.com/adap/flower/pull/4249), " +"[#4324](https://github.com/adap/flower/pull/4324), " +"[#4219](https://github.com/adap/flower/pull/4219), " +"[#4327](https://github.com/adap/flower/pull/4327))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" + +#: ../../source/ref-changelog.md:168 +msgid "" +"Refined evaluation pipelines, metrics, and documentation for the upcoming" +" FlowerTune LLM Leaderboard across multiple domains including Finance, " +"Medical, and general NLP. Stay tuned for the official launch—we welcome " +"all federated learning and LLM enthusiasts to participate in this " +"exciting challenge!" msgstr "" -#: ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:170 #, fuzzy msgid "" -"**Introduce new** `server_fn(context)` " -"([#3773](https://github.com/adap/flower/pull/3773), " -"[#3796](https://github.com/adap/flower/pull/3796), " -"[#3771](https://github.com/adap/flower/pull/3771))" +"**Enhance Docker Support and Documentation** " +"([#4191](https://github.com/adap/flower/pull/4191), " +"[#4251](https://github.com/adap/flower/pull/4251), " +"[#4190](https://github.com/adap/flower/pull/4190), " +"[#3928](https://github.com/adap/flower/pull/3928), " +"[#4298](https://github.com/adap/flower/pull/4298), " +"[#4192](https://github.com/adap/flower/pull/4192), " +"[#4136](https://github.com/adap/flower/pull/4136), " +"[#4187](https://github.com/adap/flower/pull/4187), " +"[#4261](https://github.com/adap/flower/pull/4261), " +"[#4177](https://github.com/adap/flower/pull/4177), " +"[#4176](https://github.com/adap/flower/pull/4176), " +"[#4189](https://github.com/adap/flower/pull/4189), " +"[#4297](https://github.com/adap/flower/pull/4297), " +"[#4226](https://github.com/adap/flower/pull/4226))" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:172 msgid "" -"In addition to the new `client_fn(context:Context)`, a new " -"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" -" `ServerApp` (instead of passing, for example, `Strategy`, directly). " -"This enables you to leverage the full `Context` on the server-side to " -"build a configurable `ServerApp`." +"Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images," +" and comprehensively updated [Docker " +"documentation](https://flower.ai/docs/framework/docker/index.html) " +"including quickstart guides and distributed Docker Compose instructions." msgstr "" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-changelog.md:174 #, fuzzy msgid "" -"**Relaunch all** `flwr new` **templates** " -"([#3877](https://github.com/adap/flower/pull/3877), " -"[#3821](https://github.com/adap/flower/pull/3821), " -"[#3587](https://github.com/adap/flower/pull/3587), " -"[#3795](https://github.com/adap/flower/pull/3795), " -"[#3875](https://github.com/adap/flower/pull/3875), " -"[#3859](https://github.com/adap/flower/pull/3859), " -"[#3760](https://github.com/adap/flower/pull/3760))" +"**Introduce Flower glossary** " +"([#4165](https://github.com/adap/flower/pull/4165), " +"[#4235](https://github.com/adap/flower/pull/4235))" +msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" + +#: ../../source/ref-changelog.md:176 +msgid "" +"Added the [Federated Learning glossary](https://flower.ai/glossary/) to " +"the Flower repository, located under the `flower/glossary/` directory. " +"This resource aims to provide clear definitions and explanations of key " +"FL concepts. Community contributions are highly welcomed to help expand " +"and refine this knowledge base — this is probably the easiest way to " +"become a Flower contributor!" +msgstr "" + +#: ../../source/ref-changelog.md:178 +#, fuzzy +msgid "" +"**Implement Message Time-to-Live (TTL)** " +"([#3620](https://github.com/adap/flower/pull/3620), " +"[#3596](https://github.com/adap/flower/pull/3596), " +"[#3615](https://github.com/adap/flower/pull/3615), " +"[#3609](https://github.com/adap/flower/pull/3609), " +"[#3635](https://github.com/adap/flower/pull/3635))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -17960,476 +17642,496 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:180 msgid "" -"All `flwr new` templates have been significantly updated to showcase new " -"Flower features and best practices. This includes using `flwr run` and " -"the new run config feature. You can now easily create a new project using" -" `flwr new` and, after following the instructions to install it, `flwr " -"run` it." +"Added comprehensive TTL support for messages in Flower's SuperLink. " +"Messages are now automatically expired and cleaned up based on " +"configurable TTL values, available through the low-level API (and used by" +" default in the high-level API)." msgstr "" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:182 #, fuzzy msgid "" -"**Introduce** `flower-supernode` **(preview)** " -"([#3353](https://github.com/adap/flower/pull/3353))" +"**Improve FAB handling** " +"([#4303](https://github.com/adap/flower/pull/4303), " +"[#4264](https://github.com/adap/flower/pull/4264), " +"[#4305](https://github.com/adap/flower/pull/4305), " +"[#4304](https://github.com/adap/flower/pull/4304))" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:184 msgid "" -"The new `flower-supernode` CLI is here to replace `flower-client-app`. " -"`flower-supernode` brings full multi-app support to the Flower client-" -"side. It also allows to pass `--node-config` to the SuperNode, which is " -"accessible in your `ClientApp` via `Context` (using the new " -"`client_fn(context: Context)` signature)." +"An 8-character hash is now appended to the FAB file name. The `flwr " +"install` command installs FABs with a more flattened folder structure, " +"reducing it from 3 levels to 1." msgstr "" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:186 #, fuzzy msgid "" -"**Introduce node config** " -"([#3782](https://github.com/adap/flower/pull/3782), " -"[#3780](https://github.com/adap/flower/pull/3780), " -"[#3695](https://github.com/adap/flower/pull/3695), " -"[#3886](https://github.com/adap/flower/pull/3886))" +"**Update documentation** " +"([#3341](https://github.com/adap/flower/pull/3341), " +"[#3338](https://github.com/adap/flower/pull/3338), " +"[#3927](https://github.com/adap/flower/pull/3927), " +"[#4152](https://github.com/adap/flower/pull/4152), " +"[#4151](https://github.com/adap/flower/pull/4151), " +"[#3993](https://github.com/adap/flower/pull/3993))" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:188 msgid "" -"A new node config feature allows you to pass a static configuration to " -"the SuperNode. This configuration is read-only and available to every " -"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " -"config via `Context` (`context.node_config`)." +"Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging " +"Face, Fastai) to use the new `flwr run` command and removed default title" +" from documentation base template. A new blockchain example has been " +"added to FAQ." msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:190 +#, fuzzy msgid "" -"**Introduce SuperExec (experimental)** " -"([#3605](https://github.com/adap/flower/pull/3605), " -"[#3723](https://github.com/adap/flower/pull/3723), " -"[#3731](https://github.com/adap/flower/pull/3731), " -"[#3589](https://github.com/adap/flower/pull/3589), " -"[#3604](https://github.com/adap/flower/pull/3604), " -"[#3622](https://github.com/adap/flower/pull/3622), " -"[#3838](https://github.com/adap/flower/pull/3838), " -"[#3720](https://github.com/adap/flower/pull/3720), " -"[#3606](https://github.com/adap/flower/pull/3606), " -"[#3602](https://github.com/adap/flower/pull/3602), " -"[#3603](https://github.com/adap/flower/pull/3603), " -"[#3555](https://github.com/adap/flower/pull/3555), " -"[#3808](https://github.com/adap/flower/pull/3808), " -"[#3724](https://github.com/adap/flower/pull/3724), " -"[#3658](https://github.com/adap/flower/pull/3658), " -"[#3629](https://github.com/adap/flower/pull/3629))" +"**Update example projects** " +"([#3716](https://github.com/adap/flower/pull/3716), " +"[#4007](https://github.com/adap/flower/pull/4007), " +"[#4130](https://github.com/adap/flower/pull/4130), " +"[#4234](https://github.com/adap/flower/pull/4234), " +"[#4206](https://github.com/adap/flower/pull/4206), " +"[#4188](https://github.com/adap/flower/pull/4188), " +"[#4247](https://github.com/adap/flower/pull/4247), " +"[#4331](https://github.com/adap/flower/pull/4331))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:192 msgid "" -"This is the first experimental release of Flower SuperExec, a new service" -" that executes your runs. It's not ready for production deployment just " -"yet, but don't hesitate to give it a try if you're interested." +"Refreshed multiple example projects including vertical FL, PyTorch " +"(advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized " +"Hugging Face quickstart with a smaller language model and removed legacy " +"simulation examples." msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:194 #, fuzzy msgid "" -"**Add new federated learning with tabular data example** " -"([#3568](https://github.com/adap/flower/pull/3568))" +"**Update translations** " +"([#4070](https://github.com/adap/flower/pull/4070), " +"[#4316](https://github.com/adap/flower/pull/4316), " +"[#4252](https://github.com/adap/flower/pull/4252), " +"[#4256](https://github.com/adap/flower/pull/4256), " +"[#4210](https://github.com/adap/flower/pull/4210), " +"[#4263](https://github.com/adap/flower/pull/4263), " +"[#4259](https://github.com/adap/flower/pull/4259))" msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:196 msgid "" -"A new code example exemplifies a federated learning setup using the " -"Flower framework on the Adult Census Income tabular dataset." +"**General improvements** " +"([#4239](https://github.com/adap/flower/pull/4239), " +"[4276](https://github.com/adap/flower/pull/4276), " +"[4204](https://github.com/adap/flower/pull/4204), " +"[4184](https://github.com/adap/flower/pull/4184), " +"[4227](https://github.com/adap/flower/pull/4227), " +"[4183](https://github.com/adap/flower/pull/4183), " +"[4202](https://github.com/adap/flower/pull/4202), " +"[4250](https://github.com/adap/flower/pull/4250), " +"[4267](https://github.com/adap/flower/pull/4267), " +"[4246](https://github.com/adap/flower/pull/4246), " +"[4240](https://github.com/adap/flower/pull/4240), " +"[4265](https://github.com/adap/flower/pull/4265), " +"[4238](https://github.com/adap/flower/pull/4238), " +"[4275](https://github.com/adap/flower/pull/4275), " +"[4318](https://github.com/adap/flower/pull/4318), " +"[#4178](https://github.com/adap/flower/pull/4178), " +"[#4315](https://github.com/adap/flower/pull/4315), " +"[#4241](https://github.com/adap/flower/pull/4241), " +"[#4289](https://github.com/adap/flower/pull/4289), " +"[#4290](https://github.com/adap/flower/pull/4290), " +"[#4181](https://github.com/adap/flower/pull/4181), " +"[#4208](https://github.com/adap/flower/pull/4208), " +"[#4225](https://github.com/adap/flower/pull/4225), " +"[#4314](https://github.com/adap/flower/pull/4314), " +"[#4174](https://github.com/adap/flower/pull/4174), " +"[#4203](https://github.com/adap/flower/pull/4203), " +"[#4274](https://github.com/adap/flower/pull/4274), " +"[#3154](https://github.com/adap/flower/pull/3154), " +"[#4201](https://github.com/adap/flower/pull/4201), " +"[#4268](https://github.com/adap/flower/pull/4268), " +"[#4254](https://github.com/adap/flower/pull/4254), " +"[#3990](https://github.com/adap/flower/pull/3990), " +"[#4212](https://github.com/adap/flower/pull/4212), " +"[#2938](https://github.com/adap/flower/pull/2938), " +"[#4205](https://github.com/adap/flower/pull/4205), " +"[#4222](https://github.com/adap/flower/pull/4222), " +"[#4313](https://github.com/adap/flower/pull/4313), " +"[#3936](https://github.com/adap/flower/pull/3936), " +"[#4278](https://github.com/adap/flower/pull/4278), " +"[#4319](https://github.com/adap/flower/pull/4319), " +"[#4332](https://github.com/adap/flower/pull/4332), " +"[#4333](https://github.com/adap/flower/pull/4333))" +msgstr "" + +#: ../../source/ref-changelog.md:202 +#, fuzzy +msgid "" +"**Drop Python 3.8 support and update minimum version to 3.9** " +"([#4180](https://github.com/adap/flower/pull/4180), " +"[#4213](https://github.com/adap/flower/pull/4213), " +"[#4193](https://github.com/adap/flower/pull/4193), " +"[#4199](https://github.com/adap/flower/pull/4199), " +"[#4196](https://github.com/adap/flower/pull/4196), " +"[#4195](https://github.com/adap/flower/pull/4195), " +"[#4198](https://github.com/adap/flower/pull/4198), " +"[#4194](https://github.com/adap/flower/pull/4194))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:187 -#, fuzzy +#: ../../source/ref-changelog.md:204 msgid "" -"**Create generic adapter layer (preview)** " -"([#3538](https://github.com/adap/flower/pull/3538), " -"[#3536](https://github.com/adap/flower/pull/3536), " -"[#3540](https://github.com/adap/flower/pull/3540))" +"Python 3.8 support was deprecated in Flower 1.9, and this release removes" +" support. Flower now requires Python 3.9 or later (Python 3.11 is " +"recommended). CI and documentation were updated to use Python 3.9 as the " +"minimum supported version. Flower now supports Python 3.9 to 3.12." msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:206 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:212 msgid "" -"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" -" with Flower in a transparent way. This makes Flower more modular and " -"allows for integration into other federated learning solutions and " -"platforms." +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:214 +#, fuzzy +msgid "Improvements" +msgstr "Améliorations facultatives" + +#: ../../source/ref-changelog.md:216 #, fuzzy msgid "" -"**Refactor Flower Simulation Engine** " -"([#3581](https://github.com/adap/flower/pull/3581), " -"[#3471](https://github.com/adap/flower/pull/3471), " -"[#3804](https://github.com/adap/flower/pull/3804), " -"[#3468](https://github.com/adap/flower/pull/3468), " -"[#3839](https://github.com/adap/flower/pull/3839), " -"[#3806](https://github.com/adap/flower/pull/3806), " -"[#3861](https://github.com/adap/flower/pull/3861), " -"[#3543](https://github.com/adap/flower/pull/3543), " -"[#3472](https://github.com/adap/flower/pull/3472), " -"[#3829](https://github.com/adap/flower/pull/3829), " -"[#3469](https://github.com/adap/flower/pull/3469))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:218 +#, fuzzy msgid "" -"The Simulation Engine was significantly refactored. This results in " -"faster and more stable simulations. It is also the foundation for " -"upcoming changes that aim to provide the next level of performance and " -"configurability in federated learning simulations." +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:220 #, fuzzy msgid "" -"**Optimize Docker containers** " -"([#3591](https://github.com/adap/flower/pull/3591))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:222 +#, fuzzy msgid "" -"Flower Docker containers were optimized and updated to use that latest " -"Flower framework features." +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:224 #, fuzzy msgid "" -"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " -"[#3789](https://github.com/adap/flower/pull/3789))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:226 +#, fuzzy msgid "" -"Improved logging aims to be more concise and helpful to show you the " -"details you actually care about." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:228 #, fuzzy msgid "" -"**Refactor framework internals** " -"([#3621](https://github.com/adap/flower/pull/3621), " -"[#3792](https://github.com/adap/flower/pull/3792), " -"[#3772](https://github.com/adap/flower/pull/3772), " -"[#3805](https://github.com/adap/flower/pull/3805), " -"[#3583](https://github.com/adap/flower/pull/3583), " -"[#3825](https://github.com/adap/flower/pull/3825), " -"[#3597](https://github.com/adap/flower/pull/3597), " -"[#3802](https://github.com/adap/flower/pull/3802), " -"[#3569](https://github.com/adap/flower/pull/3569))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" - -#: ../../source/ref-changelog.md:207 -#, fuzzy -msgid "Documentation improvements" -msgstr "Améliorations facultatives" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:230 #, fuzzy msgid "" -"**Add 🇰🇷 Korean translations** " -"([#3680](https://github.com/adap/flower/pull/3680))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"**Ouvrir dans le bouton Colab** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:232 #, fuzzy msgid "" -"**Update translations** " -"([#3586](https://github.com/adap/flower/pull/3586), " -"[#3679](https://github.com/adap/flower/pull/3679), " -"[#3570](https://github.com/adap/flower/pull/3570), " -"[#3681](https://github.com/adap/flower/pull/3681), " -"[#3617](https://github.com/adap/flower/pull/3617), " -"[#3674](https://github.com/adap/flower/pull/3674), " -"[#3671](https://github.com/adap/flower/pull/3671), " -"[#3572](https://github.com/adap/flower/pull/3572), " -"[#3631](https://github.com/adap/flower/pull/3631))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-changelog.md:238 #, fuzzy -msgid "" -"**Update documentation** " -"([#3864](https://github.com/adap/flower/pull/3864), " -"[#3688](https://github.com/adap/flower/pull/3688), " -"[#3562](https://github.com/adap/flower/pull/3562), " -"[#3641](https://github.com/adap/flower/pull/3641), " -"[#3384](https://github.com/adap/flower/pull/3384), " -"[#3634](https://github.com/adap/flower/pull/3634), " -"[#3823](https://github.com/adap/flower/pull/3823), " -"[#3793](https://github.com/adap/flower/pull/3793), " -"[#3707](https://github.com/adap/flower/pull/3707))" -msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:244 msgid "" -"Updated documentation includes new install instructions for different " -"shells, a new Flower Code Examples documentation landing page, new `flwr`" -" CLI docs and an updated federated XGBoost code example." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:219 -msgid "**Deprecate** `client_fn(cid: str)`" +#: ../../source/ref-changelog.md:248 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:250 msgid "" -"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " -"This signature is now deprecated. Use the new signature " -"`client_fn(context: Context) -> Client` instead. The new argument " -"`context` allows accessing `node_id`, `node_config`, `run_config` and " -"other `Context` features. When running using the simulation engine (or " -"using `flower-supernode` with a custom `--node-config partition-id=...`)," -" `context.node_config[\"partition-id\"]` will return an `int` partition " -"ID that can be used with Flower Datasets to load a different partition of" -" the dataset on each simulated or deployed SuperNode." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:252 msgid "" -"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" -" `ServerApp` **directly**" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "" -"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " -"is now deprecated. Instead of passing " -"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " -"pass them wrapped in a `server_fn(context: Context) -> " -"ServerAppComponents` function, like this: " -"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " -"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " -"to that, `server_fn` allows you to access `Context` (for example, to read" -" the `run_config`)." +#: ../../source/ref-changelog.md:254 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:256 #, fuzzy msgid "" -"**Remove support for `client_ids` in `start_simulation`** " -"([#3699](https://github.com/adap/flower/pull/3699))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"**Améliorer la prise en charge des GPU dans les simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:258 msgid "" -"The (rarely used) feature that allowed passing custom `client_ids` to the" -" `start_simulation` function was removed. This removal is part of a " -"bigger effort to refactor the simulation engine and unify how the Flower " -"internals work in simulation and deployment." +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:233 -#, fuzzy -msgid "" -"**Remove `flower-driver-api` and `flower-fleet-api`** " -"([#3418](https://github.com/adap/flower/pull/3418))" +#: ../../source/ref-changelog.md:260 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:262 msgid "" -"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" -"api` were removed in an effort to streamline the SuperLink developer " -"experience. Use `flower-superlink` instead." +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-changelog.md:237 -#, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:263 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." +msgstr "" -#: ../../source/ref-changelog.md:243 +#: ../../source/ref-changelog.md:264 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:247 +#: ../../source/ref-changelog.md:266 #, fuzzy msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:249 +#: ../../source/ref-changelog.md:268 msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" +msgstr "" + +#: ../../source/ref-changelog.md:270 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-changelog.md:251 +#: ../../source/ref-changelog.md:271 msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-changelog.md:253 -#, fuzzy +#: ../../source/ref-changelog.md:272 msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:255 +#: ../../source/ref-changelog.md:273 msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-changelog.md:257 +#: ../../source/ref-changelog.md:275 #, fuzzy msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:259 +#: ../../source/ref-changelog.md:277 msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-changelog.md:261 +#: ../../source/ref-changelog.md:279 #, fuzzy msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -18440,44 +18142,23 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:263 -msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." -msgstr "" - -#: ../../source/ref-changelog.md:265 -#, fuzzy -msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" -msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" - -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:281 msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:283 #, fuzzy msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" "**Nouvel exemple de code MLCube** " "([#779](https://github.com/adap/flower/pull/779), " @@ -18485,497 +18166,526 @@ msgstr "" "[#1065](https://github.com/adap/flower/pull/1065), " "[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:271 +#: ../../source/ref-changelog.md:285 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: ../../source/ref-changelog.md:273 +#: ../../source/ref-changelog.md:287 #, fuzzy msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:275 +#: ../../source/ref-changelog.md:289 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: ../../source/ref-changelog.md:277 +#: ../../source/ref-changelog.md:291 #, fuzzy msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:279 +#: ../../source/ref-changelog.md:293 msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." msgstr "" -#: ../../source/ref-changelog.md:281 -#, fuzzy +#: ../../source/ref-changelog.md:295 msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:283 +#: ../../source/ref-changelog.md:297 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:299 msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" msgstr "" -#: ../../source/ref-changelog.md:285 +#: ../../source/ref-changelog.md:305 #, fuzzy msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:287 +#: ../../source/ref-changelog.md:307 msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:289 +#: ../../source/ref-changelog.md:316 +#, fuzzy msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:291 +#: ../../source/ref-changelog.md:318 msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:293 +#: ../../source/ref-changelog.md:320 +#, fuzzy msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:295 -msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +#: ../../source/ref-changelog.md:322 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:297 +#: ../../source/ref-changelog.md:324 +#, fuzzy msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:299 +#: ../../source/ref-changelog.md:326 msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:301 -msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +#: ../../source/ref-changelog.md:332 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:338 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" -msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" - -#: ../../source/ref-changelog.md:305 -msgid "As always, Flower code examples have received many updates." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:307 +#: ../../source/ref-changelog.md:340 msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:311 +#: ../../source/ref-changelog.md:342 #, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "**Créer le PR**" +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:313 +#: ../../source/ref-changelog.md:348 msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:315 +#: ../../source/ref-changelog.md:352 #, fuzzy msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:317 +#: ../../source/ref-changelog.md:354 msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:319 +#: ../../source/ref-changelog.md:356 #, fuzzy msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" -msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" - -#: ../../source/ref-changelog.md:321 -msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:325 +#: ../../source/ref-changelog.md:358 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:327 +#: ../../source/ref-changelog.md:360 +#, fuzzy msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:329 -#, fuzzy +#: ../../source/ref-changelog.md:362 msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:331 +#: ../../source/ref-changelog.md:364 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:333 +#: ../../source/ref-changelog.md:366 #, fuzzy msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:335 +#: ../../source/ref-changelog.md:368 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:337 +#: ../../source/ref-changelog.md:370 #, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +msgid "" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:343 +#: ../../source/ref-changelog.md:372 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:347 +#: ../../source/ref-changelog.md:374 +#, fuzzy msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:349 +#: ../../source/ref-changelog.md:376 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:351 +#: ../../source/ref-changelog.md:378 #, fuzzy msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:353 +#: ../../source/ref-changelog.md:380 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." +msgstr "" + +#: ../../source/ref-changelog.md:382 +msgid "" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" +msgstr "" + +#: ../../source/ref-changelog.md:384 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:355 +#: ../../source/ref-changelog.md:386 #, fuzzy msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:357 +#: ../../source/ref-changelog.md:388 msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:359 +#: ../../source/ref-changelog.md:390 #, fuzzy msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" +msgstr "" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" + +#: ../../source/ref-changelog.md:392 +msgid "" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." +msgstr "" + +#: ../../source/ref-changelog.md:394 +#, fuzzy +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" "**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," " [#1470](https://github.com/adap/flower/pull/1470), " @@ -18984,89 +18694,114 @@ msgstr "" "[#1474](https://github.com/adap/flower/pull/1474), " "[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:361 +#: ../../source/ref-changelog.md:396 msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:363 +#: ../../source/ref-changelog.md:398 #, fuzzy msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:365 +#: ../../source/ref-changelog.md:400 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:367 +#: ../../source/ref-changelog.md:402 #, fuzzy msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:369 +#: ../../source/ref-changelog.md:404 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:371 +#: ../../source/ref-changelog.md:406 #, fuzzy msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" +msgstr "" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:410 +#, fuzzy +msgid "Documentation improvements" +msgstr "Améliorations facultatives" + +#: ../../source/ref-changelog.md:412 +#, fuzzy +msgid "" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" +msgstr "" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" + +#: ../../source/ref-changelog.md:414 +#, fuzzy +msgid "" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" +msgstr "" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:416 +#, fuzzy +msgid "" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -19077,127 +18812,113 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:373 +#: ../../source/ref-changelog.md:418 msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-changelog.md:375 -#, fuzzy -msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +#: ../../source/ref-changelog.md:422 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:377 +#: ../../source/ref-changelog.md:424 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-changelog.md:379 -#, fuzzy +#: ../../source/ref-changelog.md:426 msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:381 +#: ../../source/ref-changelog.md:428 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-changelog.md:383 +#: ../../source/ref-changelog.md:432 #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:385 +#: ../../source/ref-changelog.md:434 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:387 +#: ../../source/ref-changelog.md:436 +#, fuzzy msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:389 +#: ../../source/ref-changelog.md:438 msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:391 +#: ../../source/ref-changelog.md:440 #, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:446 msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +msgstr "" + +#: ../../source/ref-changelog.md:450 +#, fuzzy +msgid "" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" "**Documentation mise à jour** " "([#1494](https://github.com/adap/flower/pull/1494), " @@ -19210,286 +18931,201 @@ msgstr "" "[#1519](https://github.com/adap/flower/pull/1519), " "[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:393 -msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." -msgstr "" - -#: ../../source/ref-changelog.md:395 +#: ../../source/ref-changelog.md:452 msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-changelog.md:401 -#, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:407 +#: ../../source/ref-changelog.md:454 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-changelog.md:411 +#: ../../source/ref-changelog.md:456 #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" -msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" - -#: ../../source/ref-changelog.md:413 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." -msgstr "" - -#: ../../source/ref-changelog.md:415 -#, fuzzy -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" -msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" - -#: ../../source/ref-changelog.md:417 -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." -msgstr "" - -#: ../../source/ref-changelog.md:419 -#, fuzzy -msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:421 +#: ../../source/ref-changelog.md:458 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:460 #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:425 +#: ../../source/ref-changelog.md:462 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: ../../source/ref-changelog.md:427 +#: ../../source/ref-changelog.md:464 #, fuzzy msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-changelog.md:466 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:468 #, fuzzy msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" "**Ajouter une nouvelle stratégie `FedProx`** " "([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-changelog.md:470 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:472 #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" -msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" - -#: ../../source/ref-changelog.md:437 -msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:439 -#, fuzzy +#: ../../source/ref-changelog.md:474 msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:476 #, fuzzy msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:443 -#, fuzzy +#: ../../source/ref-changelog.md:478 msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:480 #, fuzzy msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" "Mettre à jour les outils de développement " "([#1231](https://github.com/adap/flower/pull/1231), " @@ -19497,1127 +19133,1059 @@ msgstr "" "[#1301](https://github.com/adap/flower/pull/1301), " "[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:482 msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: ../../source/ref-changelog.md:449 +#: ../../source/ref-changelog.md:484 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:451 -msgid "Many Flower code examples received substantial updates." +#: ../../source/ref-changelog.md:486 +msgid "" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 -#, fuzzy -msgid "**Update Flower Baselines**" -msgstr "Demande pour une nouvelle Flower Baseline" - -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:488 #, fuzzy msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:456 -#, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-changelog.md:490 +msgid "" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:457 -#, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: ../../source/ref-changelog.md:492 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" - -#: ../../source/ref-changelog.md:458 -#, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:459 -#, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:460 -#, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-changelog.md:494 +msgid "" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:462 -#, fuzzy +#: ../../source/ref-changelog.md:496 msgid "" "**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:464 +#: ../../source/ref-changelog.md:498 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-changelog.md:466 +#: ../../source/ref-changelog.md:500 msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-changelog.md:468 +#: ../../source/ref-changelog.md:502 msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: ../../source/ref-changelog.md:470 +#: ../../source/ref-changelog.md:504 msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: ../../source/ref-changelog.md:474 +#: ../../source/ref-changelog.md:506 #, fuzzy msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" -msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" - -#: ../../source/ref-changelog.md:476 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:478 -#, fuzzy -msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +#: ../../source/ref-changelog.md:508 +msgid "As always, Flower code examples have received many updates." msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:480 +#: ../../source/ref-changelog.md:510 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:514 #, fuzzy -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" -msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" +msgid "**Deprecate Python 3.8 support**" +msgstr "**Créer le PR**" -#: ../../source/ref-changelog.md:484 -#, fuzzy +#: ../../source/ref-changelog.md:516 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:518 #, fuzzy msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" "**Rename** `Weights` **to** `NDArrays` " "([#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:488 -msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." -msgstr "" - -#: ../../source/ref-changelog.md:490 -#, fuzzy -msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" - -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-changelog.md:520 msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-changelog.md:522 #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:500 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:504 -#, fuzzy +#: ../../source/ref-changelog.md:524 msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:506 -#, fuzzy +#: ../../source/ref-changelog.md:528 msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:508 +#: ../../source/ref-changelog.md:530 msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: ../../source/ref-changelog.md:510 +#: ../../source/ref-changelog.md:532 #, fuzzy msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" - -#: ../../source/ref-changelog.md:512 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:514 -#, fuzzy +#: ../../source/ref-changelog.md:534 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:516 +#: ../../source/ref-changelog.md:536 #, fuzzy msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:518 -#, fuzzy +#: ../../source/ref-changelog.md:538 msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" -msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" - -#: ../../source/ref-changelog.md:520 -msgid "Add gRPC request-response capability to the Android SDK." +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-changelog.md:540 #, fuzzy -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" -msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" - -#: ../../source/ref-changelog.md:524 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "" +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:526 -#, fuzzy +#: ../../source/ref-changelog.md:546 msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:528 +#: ../../source/ref-changelog.md:550 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -#: ../../source/ref-changelog.md:530 +#: ../../source/ref-changelog.md:552 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: ../../source/ref-changelog.md:532 +#: ../../source/ref-changelog.md:554 #, fuzzy msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" "**Mettre à jour les exemples de code** " "([#1291](https://github.com/adap/flower/pull/1291), " "[#1286](https://github.com/adap/flower/pull/1286), " "[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:534 +#: ../../source/ref-changelog.md:556 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: ../../source/ref-changelog.md:536 +#: ../../source/ref-changelog.md:558 #, fuzzy msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:538 -#, fuzzy +#: ../../source/ref-changelog.md:560 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:540 +#: ../../source/ref-changelog.md:562 #, fuzzy msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 -#, fuzzy +#: ../../source/ref-changelog.md:564 msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-changelog.md:566 #, fuzzy msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:550 -#, fuzzy +#: ../../source/ref-changelog.md:568 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-changelog.md:570 #, fuzzy msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:554 -#, fuzzy +#: ../../source/ref-changelog.md:572 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" - -#: ../../source/ref-changelog.md:556 -#, fuzzy -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:558 -#, fuzzy -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" - -#: ../../source/ref-changelog.md:560 -#, fuzzy -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" - -#: ../../source/ref-changelog.md:562 -#, fuzzy -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" - -#: ../../source/ref-changelog.md:564 -#, fuzzy -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:566 -#, fuzzy -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:568 -#, fuzzy -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:570 +#: ../../source/ref-changelog.md:574 #, fuzzy msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:572 -#, fuzzy +#: ../../source/ref-changelog.md:576 msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:578 #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:576 -#, fuzzy +#: ../../source/ref-changelog.md:580 msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:582 #, fuzzy msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:584 msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" -msgstr "" - -#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 -#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 -#: ../../source/ref-changelog.md:857 -msgid "Flower received many improvements under the hood, too many to list here." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " -"pour être énumérées ici." #: ../../source/ref-changelog.md:586 #, fuzzy msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" #: ../../source/ref-changelog.md:588 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" #: ../../source/ref-changelog.md:590 -#, fuzzy msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" #: ../../source/ref-changelog.md:592 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" #: ../../source/ref-changelog.md:594 #, fuzzy -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:600 msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:604 -#, fuzzy +#: ../../source/ref-changelog.md:596 msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:598 msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/ref-changelog.md:608 -msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." -msgstr "" +#: ../../source/ref-changelog.md:604 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" #: ../../source/ref-changelog.md:610 msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" -msgstr "" - -#: ../../source/ref-changelog.md:612 -msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" #: ../../source/ref-changelog.md:614 #, fuzzy msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" #: ../../source/ref-changelog.md:616 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" #: ../../source/ref-changelog.md:618 #, fuzzy msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" #: ../../source/ref-changelog.md:620 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" #: ../../source/ref-changelog.md:622 #, fuzzy msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" #: ../../source/ref-changelog.md:624 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" #: ../../source/ref-changelog.md:626 #, fuzzy -msgid "**Deprecate Python 3.7**" -msgstr "**Créer le PR**" +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" +msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" #: ../../source/ref-changelog.md:628 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" #: ../../source/ref-changelog.md:630 #, fuzzy msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" #: ../../source/ref-changelog.md:632 -#, fuzzy msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." #: ../../source/ref-changelog.md:634 #, fuzzy msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" "**Ajouter une nouvelle stratégie `FedProx`** " "([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:636 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" #: ../../source/ref-changelog.md:638 #, fuzzy msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" #: ../../source/ref-changelog.md:640 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" #: ../../source/ref-changelog.md:642 #, fuzzy msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" #: ../../source/ref-changelog.md:644 +#, fuzzy msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:646 #, fuzzy msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:648 #, fuzzy msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" #: ../../source/ref-changelog.md:650 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" #: ../../source/ref-changelog.md:652 #, fuzzy msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" #: ../../source/ref-changelog.md:654 -#, fuzzy -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +msgid "Many Flower code examples received substantial updates." msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:656 -msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." -msgstr "" +#: ../../source/ref-changelog.md:656 ../../source/ref-changelog.md:749 +#, fuzzy +msgid "**Update Flower Baselines**" +msgstr "Demande pour une nouvelle Flower Baseline" #: ../../source/ref-changelog.md:658 #, fuzzy msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:660 -msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +#: ../../source/ref-changelog.md:659 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:660 #, fuzzy -msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:664 +#: ../../source/ref-changelog.md:661 #, fuzzy -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:666 -msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" -msgstr "" +#: ../../source/ref-changelog.md:662 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:668 -msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +#: ../../source/ref-changelog.md:663 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:670 +#: ../../source/ref-changelog.md:665 #, fuzzy msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -20628,5153 +20196,5067 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:678 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:667 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:669 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " -"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:671 msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " -"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " -"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " -"XGBoost." -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:673 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:677 +#, fuzzy msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " -"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " -"disposons désormais d'un SDK swift iOS présent sous " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" qui facilitera grandement le processus de création d'applications. Pour " -"présenter son utilisation, l'[exemple " -"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " -"été mis à jour !" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:679 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" -" \"** ([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:681 #, fuzzy msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -"Un nouveau [tutoriel d'entrée de gamme] " -"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " -"documentation explique les bases de l'apprentissage fédéré. Il permet à " -"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" -" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " -"l'apprentissage fédéré !" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:683 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:685 +#, fuzzy msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " -"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " -"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " -"qui vise à rendre la convergence plus robuste dans des contextes " -"hétérogènes." +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:687 +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:689 +#, fuzzy msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"Cette nouvelle ligne de base reproduit une expérience évaluant les " -"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" -" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " -"2018)] (https://arxiv.org/abs/1812.01097)." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:691 msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:693 +#, fuzzy msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -"Une nouvelle API REST a été introduite comme alternative à la pile de " -"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " -"prend en charge que les clients anonymes." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:695 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:714 -msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +#: ../../source/ref-changelog.md:697 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:703 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"L'API du pilote est encore une fonction expérimentale, mais cette version" -" introduit quelques améliorations majeures. L'une des principales " -"améliorations est l'introduction d'une base de données SQLite pour " -"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " -"autre amélioration est que les tâches (instructions ou résultats) qui ont" -" été livrées seront désormais supprimées, ce qui améliore " -"considérablement l'efficacité de la mémoire d'un serveur Flower " -"fonctionnant depuis longtemps." -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:707 +#, fuzzy msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"**Répare les problèmes de déversement liés à Ray pendant les " -"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:709 #, fuzzy msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"Lors de l'exécution de longues simulations, `ray` déversait parfois " -"d'énormes quantités de données qui rendaient l'entraînement incapable de " -"continuer. ce problème est maintenant corrigé ! 🎉" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:711 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:713 +#, fuzzy msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -"TabNet est un cadre puissant et flexible pour former des modèles " -"d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : [quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)." +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:715 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"**Ajouter un nouveau guide pratique pour le suivi des simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:728 +#: ../../source/ref-changelog.md:717 +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -"Nous avons maintenant un guide de documentation pour aider les " -"utilisateurs à surveiller leurs performances pendant les simulations." +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:719 +#, fuzzy msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"**Ajouter des mesures de formation à** `History` **objet pendant les " -"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:721 +#, fuzzy msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" +msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" + +#: ../../source/ref-changelog.md:723 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " -"les mesures d'entraînement, mais les versions précédentes " -"n'enregistraient pas les résultats dans l'objet `History`. c'est " -"désormais le cas !" -#: ../../source/ref-changelog.md:734 +#: ../../source/ref-changelog.md:725 +#, fuzzy msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/ada" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:742 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:727 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:729 +#, fuzzy msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:731 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " -"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:733 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " -"qui peut être utilisé pour identifier la charge de travail à laquelle une" -" tâche appartient. Elle prend également en charge un nouveau `group_id` " -"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " -"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " -"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:735 +#, fuzzy msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " -"flotte soit configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:737 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " -"API) peut maintenant configurer l'adresse du serveur de Driver API (via " -"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " -"de son démarrage :" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:739 #, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" -"address \"0.0.0.0:8086\" ``" - -#: ../../source/ref-changelog.md:762 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:741 +#, fuzzy msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:743 +#, fuzzy msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " -"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:745 ../../source/ref-changelog.md:747 +#, fuzzy msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " -"dernières versions d'Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:751 #, fuzzy msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"L'exemple de code Android a reçu une mise à jour substantielle : le " -"projet est compatible avec Flower 1.0 et les versions ultérieures, " -"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " -"est mis à jour pour être compatible avec les outils Android les plus " -"récents." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:772 +#: ../../source/ref-changelog.md:753 +#, fuzzy msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:755 +#, fuzzy msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"Cette " -"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" est presque identique à " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " -"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " -"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" -" rapport aux modèles globaux." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:757 +#, fuzzy msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"**Ajouter de nouvelles métriques aux événements de télémétrie** " -"([#1640](https://github.com/adap/flower/pull/1640))" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:778 -msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +#: ../../source/ref-changelog.md:759 +#, fuzzy +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:761 +#, fuzzy +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -"Une structure d'événements mise à jour permet, par exemple, de regrouper " -"des événements au sein d'une même charge de travail." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" + +#: ../../source/ref-changelog.md:763 +#, fuzzy +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:765 +#, fuzzy +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:767 +#, fuzzy +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:769 +#, fuzzy +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:771 +#, fuzzy +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:773 +#, fuzzy msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur les stratégies " -"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:775 #, fuzzy msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" -" : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" -"-Strategy-PyTorch.ipynb)" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:777 +#, fuzzy msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur la sérialisation " -"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:779 #, fuzzy msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la sérialisation personnalisée : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.ipynb)" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:781 +#, fuzzy msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:783 +msgid "" "**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/ada" +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:785 ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:1060 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "" +"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " +"pour être énumérées ici." -#: ../../source/ref-changelog.md:792 +#: ../../source/ref-changelog.md:789 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:791 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " -"autre étape dans notre effort pour faire de la documentation de Flower la" -" meilleure documentation de tout projet. Reste à l'écoute et comme " -"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/ref-changelog.md:800 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: ../../source/ref-changelog.md:793 +#, fuzzy +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" +msgstr "" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:806 +#: ../../source/ref-changelog.md:795 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:797 +#, fuzzy +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.4.0 (2023-04-21)" + +#: ../../source/ref-changelog.md:803 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:812 +#: ../../source/ref-changelog.md:807 +#, fuzzy msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"Au cours des prochaines semaines, nous publierons un certain nombre de " -"nouvelles implémentations de référence utiles en particulier pour les " -"nouveaux venus en FL. Elles revisiteront généralement des articles bien " -"connus de la littérature, et seront adaptées à l'intégration dans votre " -"propre application ou à l'expérimentation, afin d'approfondir votre " -"connaissance de FL en général. La publication d'aujourd'hui est la " -"première de cette série. [Lire la " -"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" -"cnn/)" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:809 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"**Améliorer la prise en charge des GPU dans les simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:811 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " -"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " -"certaines des leçons durement apprises lors de la mise à l'échelle des " -"simulations dans des environnements de grappes de GPU. De nouveaux " -"paramètres par défaut rendent l'exécution des simulations basées sur les " -"GPU beaucoup plus robuste." -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:813 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " -"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:815 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " -"toujours été faciles à utiliser sur les instances GPU. Nous les avons " -"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " -"Découvre les carnets mis à jour ici :" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:817 #, fuzzy msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"[Une introduction à l'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:823 -#, fuzzy +#: ../../source/ref-changelog.md:819 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -"[Stratégies d'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:821 #, fuzzy msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" + +#: ../../source/ref-changelog.md:823 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -"[Construire une stratégie] " -"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" -"PyTorch.html)" #: ../../source/ref-changelog.md:825 #, fuzzy msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" -"and-NumPyClient-PyTorch.html)" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" #: ../../source/ref-changelog.md:827 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" #: ../../source/ref-changelog.md:829 -msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." -msgstr "" -"À la suite d'une [demande de commentaires] " -"(https://github.com/adap/flower/issues/1534) de la part de la communauté," -" le projet open-source Flower introduit la collecte optionnelle de " -"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " -"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " -"comment Flower est utilisé et quels sont les défis auxquels les " -"utilisateurs peuvent être confrontés." +#, fuzzy +msgid "**Deprecate Python 3.7**" +msgstr "**Créer le PR**" #: ../../source/ref-changelog.md:831 -#, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** Restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " -"suite.](https://flower.ai/docs/telemetry.html)." #: ../../source/ref-changelog.md:833 +#, fuzzy msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" #: ../../source/ref-changelog.md:835 +#, fuzzy msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" -" permettra de créer des applications Federated Learning et Federated " -"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " -"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " -"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" -" peux commencer à construire ces choses dès maintenant, aussi." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." #: ../../source/ref-changelog.md:837 +#, fuzzy msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -"L'API du pilote permet également un nouveau mode d'exécution dans lequel " -"le serveur s'exécute indéfiniment. Plusieurs charges de travail " -"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " -"leur exécution indépendamment du serveur. Ceci est particulièrement utile" -" pour les utilisateurs qui souhaitent déployer Flower en production." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" #: ../../source/ref-changelog.md:839 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " -"attendons tes commentaires avec impatience !" #: ../../source/ref-changelog.md:841 +#, fuzzy msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -"Remarque : *L'API du pilote est encore expérimentale et est susceptible " -"de changer de manière significative au fil du temps.*" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" #: ../../source/ref-changelog.md:843 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" #: ../../source/ref-changelog.md:845 +#, fuzzy msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" #: ../../source/ref-changelog.md:847 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" #: ../../source/ref-changelog.md:849 +#, fuzzy msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " -"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " -"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." #: ../../source/ref-changelog.md:851 +#, fuzzy msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " -"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" #: ../../source/ref-changelog.md:853 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" -" compatible avec la dernière version de Flower." #: ../../source/ref-changelog.md:855 +#, fuzzy msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:857 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:859 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" -" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" - -#: ../../source/ref-changelog.md:869 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:861 +#, fuzzy msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -"Nous aimerions **remercier tout particulièrement** tous les contributeurs" -" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:863 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:865 +#, fuzzy msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:881 -msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." -msgstr "" -"Le premier aperçu (expérimental) des wrappers enfichables de " -"confidentialité différentielle permet de configurer et d'utiliser " -"facilement la confidentialité différentielle (DP). Les wrappers DP " -"enfichables permettent une utilisation agnostique du cadre **et** de la " -"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " -"voir les documents de Flower, un nouvel explicatif va plus loin dans les " -"détails." +#: ../../source/ref-changelog.md:867 +#, fuzzy +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:869 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:871 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " -"clients Flower peuvent être construits pour iOS. L'exemple de code " -"contient à la fois des composants Flower iOS SDK qui peuvent être " -"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " -"sur CoreML." -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:873 +#, fuzzy msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"**Nouvelle stratégie de FedMedian** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" + +#: ../../source/ref-changelog.md:881 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:887 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" #: ../../source/ref-changelog.md:891 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " +"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" #: ../../source/ref-changelog.md:893 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -"Toutes les exceptions `Client` qui se produisent dans le VCE sont " -"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" -" `Stratégie` configurée (via l'argument `failures`)." +"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " +"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" et un [exemple de code] " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"XGBoost." #: ../../source/ref-changelog.md:895 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -"**Améliorer le moteur du client virtuel** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" #: ../../source/ref-changelog.md:897 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " -"dictionnaire `client_resources` a été remplacé par `float` pour permettre" -" l'allocation de fractions de ressources." +"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " +"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " +"disposons désormais d'un SDK swift iOS présent sous " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" qui facilitera grandement le processus de création d'applications. Pour " +"présenter son utilisation, l'[exemple " +"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " +"été mis à jour !" #: ../../source/ref-changelog.md:899 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" +" \"** ([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" #: ../../source/ref-changelog.md:901 +#, fuzzy msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -"Le moteur de client virtuel prend désormais en charge les méthodes " -"optionnelles `Client` (et `NumPyClient`)." +"Un nouveau [tutoriel d'entrée de gamme] " +"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " +"documentation explique les bases de l'apprentissage fédéré. Il permet à " +"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" +" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " +"l'apprentissage fédéré !" #: ../../source/ref-changelog.md:903 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -"**Fournir des informations de type aux paquets en utilisant** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" #: ../../source/ref-changelog.md:905 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " -"indiquant que le paquet est typé. Cela permet de prendre en charge le " -"typage pour les projets ou les paquets qui utilisent `flwr` en leur " -"permettant d'améliorer leur code à l'aide de vérificateurs de types " -"statiques comme `mypy`." +"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " +"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " +"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " +"qui vise à rendre la convergence plus robuste dans des contextes " +"hétérogènes." #: ../../source/ref-changelog.md:907 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" #: ../../source/ref-changelog.md:909 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " -"mis à jour pour fonctionner avec la dernière version de Flower." +"Cette nouvelle ligne de base reproduit une expérience évaluant les " +"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" +" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " +"2018)] (https://arxiv.org/abs/1812.01097)." #: ../../source/ref-changelog.md:911 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"**Documentation mise à jour** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" #: ../../source/ref-changelog.md:913 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -"Il y a eu tellement de mises à jour de la documentation que cela n'a même" -" pas de sens de les énumérer individuellement." +"Une nouvelle API REST a été introduite comme alternative à la pile de " +"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " +"prend en charge que les clients anonymes." #: ../../source/ref-changelog.md:915 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." #: ../../source/ref-changelog.md:917 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"La documentation a été restructurée pour faciliter la navigation. Ce " -"n'est que la première étape d'un effort plus important visant à faire de " -"la documentation de Flower la meilleure documentation de tous les projets" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" #: ../../source/ref-changelog.md:919 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -"**Ouvrir dans le bouton Colab** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"L'API du pilote est encore une fonction expérimentale, mais cette version" +" introduit quelques améliorations majeures. L'une des principales " +"améliorations est l'introduction d'une base de données SQLite pour " +"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " +"autre amélioration est que les tâches (instructions ou résultats) qui ont" +" été livrées seront désormais supprimées, ce qui améliore " +"considérablement l'efficacité de la mémoire d'un serveur Flower " +"fonctionnant depuis longtemps." #: ../../source/ref-changelog.md:921 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " -"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " -"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " -"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " -"il te suffit d'un simple clic." +"**Répare les problèmes de déversement liés à Ray pendant les " +"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" #: ../../source/ref-changelog.md:923 +#, fuzzy msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"Lors de l'exécution de longues simulations, `ray` déversait parfois " +"d'énormes quantités de données qui rendaient l'entraînement incapable de " +"continuer. ce problème est maintenant corrigé ! 🎉" #: ../../source/ref-changelog.md:925 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" -" parties couvrant les stratégies personnalisées (encore WIP) et la " -"distinction entre `Client` et `NumPyClient`. Les parties un et deux " -"existantes ont également été améliorées (beaucoup de petits changements " -"et de corrections)." - -#: ../../source/ref-changelog.md:931 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" +"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:933 -msgid "Highlights" -msgstr "Points forts" - -#: ../../source/ref-changelog.md:935 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" - -#: ../../source/ref-changelog.md:936 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" - -#: ../../source/ref-changelog.md:937 -msgid "Configurable `get_parameters`" -msgstr "`get_parameters` configurable" - -#: ../../source/ref-changelog.md:938 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "" -"Des tonnes de petits nettoyages d'API résultant en une expérience plus " -"cohérente pour les développeurs" - -#: ../../source/ref-changelog.md:942 +#: ../../source/ref-changelog.md:927 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"Nous tenons à remercier **particulièrement** tous les contributeurs qui " -"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors)) :" +"TabNet est un cadre puissant et flexible pour former des modèles " +"d'apprentissage automatique sur des données tabulaires. Nous avons " +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." -#: ../../source/ref-changelog.md:944 +#: ../../source/ref-changelog.md:929 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:948 +#: ../../source/ref-changelog.md:931 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -"**Tous les arguments doivent être passés comme des arguments de mot-clé**" -" ([#1338](https://github.com/adap/flower/pull/1338))" +"Nous avons maintenant un guide de documentation pour aider les " +"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/ref-changelog.md:950 -#, fuzzy +#: ../../source/ref-changelog.md:933 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"Le code qui utilise des arguments positionnels (par exemple, " -"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" -"clé pour chaque argument positionnel (par exemple, " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:952 +#: ../../source/ref-changelog.md:935 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"**Introduire l'objet de configuration** `ServerConfig` **dans** " -"`start_server` **et** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " +"les mesures d'entraînement, mais les versions précédentes " +"n'enregistraient pas les résultats dans l'objet `History`. c'est " +"désormais le cas !" -#: ../../source/ref-changelog.md:954 +#: ../../source/ref-changelog.md:937 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " -"attendent maintenant un objet de configuration de type " -"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " -"le dict de configuration précédent, mais il rend l'écriture de code " -"sécurisé plus facile et les valeurs des paramètres par défaut plus " -"transparentes." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/ada" -#: ../../source/ref-changelog.md:956 -msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "" -"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " -"([#1334](https://github.com/adap/flower/pull/1334))" +#: ../../source/ref-changelog.md:945 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:951 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"Les paramètres de stratégie intégrés suivants ont été renommés pour " -"améliorer la lisibilité et la cohérence avec d'autres API :" - -#: ../../source/ref-changelog.md:960 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:961 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:962 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:955 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -"**Mettre à jour les arguments par défaut des stratégies intégrées** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " +"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:957 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" -" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " -"les clients actuellement disponibles pour l'entraînement et l'évaluation." -" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " -"peuvent retrouver le comportement antérieur en initialisant la stratégie " -"de la manière suivante :" - -#: ../../source/ref-changelog.md:968 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " +"qui peut être utilisé pour identifier la charge de travail à laquelle une" +" tâche appartient. Elle prend également en charge un nouveau `group_id` " +"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " +"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " +"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:959 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"**Ajouter** `server_round` **à** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " +"flotte soit configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:961 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre." +"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " +"API) peut maintenant configurer l'adresse du serveur de Driver API (via " +"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " +"de son démarrage :" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:963 +#, fuzzy msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" -" ([#1334](https://github.com/adap/flower/pull/1334))" +"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" +"address \"0.0.0.0:8086\" ``" -#: ../../source/ref-changelog.md:976 -msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." -msgstr "" -"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " -"maintenant trois paramètres : (1) le cycle actuel " -"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" -" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" -" (`config`)." +#: ../../source/ref-changelog.md:965 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:967 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:969 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " -"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " -"`aggregate_evaluate`) reçoivent le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" -" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " -"renommé de `rnd` à `server_round`." +"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " +"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:971 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:984 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." +"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " +"dernières versions d'Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:973 +#, fuzzy msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"L'exemple de code Android a reçu une mise à jour substantielle : le " +"projet est compatible avec Flower 1.0 et les versions ultérieures, " +"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " +"est mis à jour pour être compatible avec les outils Android les plus " +"récents." -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:975 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " -"`FedFSv0`, `FedFSv1`)." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:977 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Cette " +"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" est presque identique à " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " +"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " +"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" +" rapport aux modèles globaux." -#: ../../source/ref-changelog.md:992 +#: ../../source/ref-changelog.md:979 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " -"rendre compte de la nature de ce type." +"**Ajouter de nouvelles métriques aux événements de télémétrie** " +"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:981 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -"**Supprimez l'ancien** `force_final_distributed_eval` **de** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Une structure d'événements mise à jour permet, par exemple, de regrouper " +"des événements au sein d'une même charge de travail." -#: ../../source/ref-changelog.md:996 +#: ../../source/ref-changelog.md:983 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " -"été un artefact historique, dans cette version il a finalement disparu " -"pour de bon." +"**Ajouter une nouvelle section de tutoriel sur les stratégies " +"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/ref-changelog.md:998 +#: ../../source/ref-changelog.md:985 +#, fuzzy msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" +" : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" +"-Strategy-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1000 +#: ../../source/ref-changelog.md:987 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -"La méthode `get_parameters` accepte maintenant un dictionnaire de " -"configuration, tout comme `get_properties`, `fit`, et `evaluate`." +"**Ajouter une nouvelle section de tutoriel sur la sérialisation " +"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:1002 +#: ../../source/ref-changelog.md:989 +#, fuzzy msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" -" `config` **paramètre** " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la sérialisation personnalisée : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1004 +#: ../../source/ref-changelog.md:991 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -"La fonction `start_simulation` accepte maintenant un dictionnaire de " -"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" -" cohérence entre `start_simulation` et `start_server` et facilite la " -"transition entre les deux." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/ada" -#: ../../source/ref-changelog.md:1008 +#: ../../source/ref-changelog.md:995 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:1010 +#: ../../source/ref-changelog.md:997 ../../source/ref-changelog.md:1064 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -"La version précédente de Flower a introduit la prise en charge " -"expérimentale de Python 3.10, cette version déclare la prise en charge de" -" Python 3.10 comme stable." +"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " +"autre étape dans notre effort pour faire de la documentation de Flower la" +" meilleure documentation de tout projet. Reste à l'écoute et comme " +"toujours, n'hésite pas à nous faire part de tes commentaires !" + +#: ../../source/ref-changelog.md:1003 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/ref-changelog.md:1012 +#: ../../source/ref-changelog.md:1009 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " -"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:1014 +#: ../../source/ref-changelog.md:1013 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " -"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " -"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " -"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " -"l'évaluation centralisée !" +"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:1016 +#: ../../source/ref-changelog.md:1015 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Au cours des prochaines semaines, nous publierons un certain nombre de " +"nouvelles implémentations de référence utiles en particulier pour les " +"nouveaux venus en FL. Elles revisiteront généralement des articles bien " +"connus de la littérature, et seront adaptées à l'intégration dans votre " +"propre application ou à l'expérimentation, afin d'approfondir votre " +"connaissance de FL en général. La publication d'aujourd'hui est la " +"première de cette série. [Lire la " +"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" +"cnn/)" -#: ../../source/ref-changelog.md:1018 +#: ../../source/ref-changelog.md:1017 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -"Comme pour `start_server`, `start_simulation` accepte maintenant une " -"instance complète de `Server`. Cela permet aux utilisateurs de " -"personnaliser fortement l'exécution des expériences et ouvre la porte à " -"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " -"virtuel." +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:1020 +#: ../../source/ref-changelog.md:1019 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " +"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " +"certaines des leçons durement apprises lors de la mise à l'échelle des " +"simulations dans des environnements de grappes de GPU. De nouveaux " +"paramètres par défaut rendent l'exécution des simulations basées sur les " +"GPU beaucoup plus robuste." -#: ../../source/ref-changelog.md:1022 +#: ../../source/ref-changelog.md:1021 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"De nombreux exemples de code ont reçu de petites ou même de grandes mises" -" à jour de maintenance" +"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " +"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:1024 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" +#: ../../source/ref-changelog.md:1023 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" +"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " +"toujours été faciles à utiliser sur les instances GPU. Nous les avons " +"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " +"Découvre les carnets mis à jour ici :" #: ../../source/ref-changelog.md:1025 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" +#, fuzzy +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" +"[Une introduction à l'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" #: ../../source/ref-changelog.md:1026 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch` (démarrage rapide)" +#, fuzzy +msgid "" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +msgstr "" +"[Stratégies d'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" #: ../../source/ref-changelog.md:1027 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" +#, fuzzy +msgid "" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" +"[Construire une stratégie] " +"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" +"PyTorch.html)" #: ../../source/ref-changelog.md:1028 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" - -#: ../../source/ref-changelog.md:1029 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow` (en anglais)" - -#: ../../source/ref-changelog.md:1031 +#, fuzzy msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -"**Supprime l'exemple de simulation obsolète** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:1030 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -"Supprime l'exemple obsolète `simulation` et renomme " -"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" -" au nom de `simulation_pytorch`" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:1032 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"À la suite d'une [demande de commentaires] " +"(https://github.com/adap/flower/issues/1534) de la part de la communauté," +" le projet open-source Flower introduit la collecte optionnelle de " +"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " +"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " +"comment Flower est utilisé et quels sont les défis auxquels les " +"utilisateurs peuvent être confrontés." -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:1034 +#, fuzzy msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -"Une mise à jour substantielle de la documentation corrige plusieurs " -"petits problèmes de rendu, rend les titres plus succincts pour améliorer " -"la navigation, supprime une bibliothèque obsolète, met à jour les " -"dépendances de la documentation, inclut le module `flwr.common` dans la " -"référence de l'API, inclut le support de la documentation basée sur le " -"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " -"nombre de détails plus petits !" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** Restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " +"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 -#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 -msgid "**Minor updates**" -msgstr "**Mises à jour mineures**" +#: ../../source/ref-changelog.md:1036 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:1038 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " -"([#1266](https://github.com/adap/flower/pull/1266))" +"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" +" permettra de créer des applications Federated Learning et Federated " +"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " +"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " +"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" +" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/ref-changelog.md:1042 +#: ../../source/ref-changelog.md:1040 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -"Ajouter une connexion gRPC sécurisée à l'exemple de code " -"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" +"L'API du pilote permet également un nouveau mode d'exécution dans lequel " +"le serveur s'exécute indéfiniment. Plusieurs charges de travail " +"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " +"leur exécution indépendamment du serveur. Ceci est particulièrement utile" +" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/ref-changelog.md:1043 +#: ../../source/ref-changelog.md:1042 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " +"attendons tes commentaires avec impatience !" #: ../../source/ref-changelog.md:1044 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -"Renomme les messages ProtoBuf pour améliorer la cohérence " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Remarque : *L'API du pilote est encore expérimentale et est susceptible " +"de changer de manière significative au fil du temps.*" #: ../../source/ref-changelog.md:1046 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" +msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" + +#: ../../source/ref-changelog.md:1048 +msgid "" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." +msgstr "" +"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." #: ../../source/ref-changelog.md:1050 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" #: ../../source/ref-changelog.md:1052 -#, fuzzy msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"La première version préliminaire de Flower Baselines est arrivée ! Nous " -"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " -"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html). Avec cette première version préliminaire, nous invitons " -"également la communauté à [contribuer à leurs propres lignes de " -"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." +"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " +"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " +"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." #: ../../source/ref-changelog.md:1054 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"**SDK client C++ (aperçu) et exemple de code** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " +"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" #: ../../source/ref-changelog.md:1056 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " -"code de démarrage rapide qui démontre un client C++ simple utilisant le " -"SDK." +"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" +" compatible avec la dernière version de Flower." #: ../../source/ref-changelog.md:1058 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" -msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" - -#: ../../source/ref-changelog.md:1060 -msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -"Python 3.10 est la dernière version stable de Python et Python 3.11 " -"devrait sortir en octobre. Cette version de Flower ajoute une prise en " -"charge expérimentale pour les deux versions de Python." +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" #: ../../source/ref-changelog.md:1062 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -"**Agréger des mesures personnalisées grâce à des fonctions fournies par " -"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:1064 +#: ../../source/ref-changelog.md:1066 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -"Les stratégies intégrées prennent en charge deux nouveaux arguments, " -"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " -"permettent de passer des fonctions d'agrégation de métriques " -"personnalisées." +"L'un des points forts est le nouveau [guide du premier contributeur] " +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:1072 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" + +#: ../../source/ref-changelog.md:1076 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -"**Temps d'attente configurable par l'utilisateur** " -"([#1162](https://github.com/adap/flower/pull/1162))" - -#: ../../source/ref-changelog.md:1068 -msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." -msgstr "" -"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " -"valeur `float` en secondes), le serveur attendra *au moins* " -"`round_timeout` secondes avant de fermer la connexion." - -#: ../../source/ref-changelog.md:1070 -msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" -msgstr "" -"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " -"l'évaluation centralisée dans toutes les stratégies intégrées** " -"([#1091](https://github.com/adap/flower/pull/1091))" - -#: ../../source/ref-changelog.md:1072 -msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." -msgstr "" -"Les stratégies intégrées peuvent maintenant effectuer une évaluation " -"fédérée (c'est-à-dire côté client) et une évaluation centralisée " -"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " -"être désactivée en réglant `fraction_eval` sur `0.0`." - -#: ../../source/ref-changelog.md:1074 -msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" -msgstr "" -"**Deux nouveaux tutoriels Jupyter Notebook** " -"([#1141](https://github.com/adap/flower/pull/1141))" - -#: ../../source/ref-changelog.md:1076 -msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "" -"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " -"expliquent les fonctionnalités de base et intermédiaires de Flower :" +"Nous aimerions **remercier tout particulièrement** tous les contributeurs" +" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" #: ../../source/ref-changelog.md:1078 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -msgstr "" -"*Introduction à l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" - -#: ../../source/ref-changelog.md:1080 -msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" #: ../../source/ref-changelog.md:1082 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" -" ([#1076](https://github.com/adap/flower/pull/1076))" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" #: ../../source/ref-changelog.md:1084 -#, fuzzy msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " -"momentum du serveur [Hsu et al., 2019]." +"Le premier aperçu (expérimental) des wrappers enfichables de " +"confidentialité différentielle permet de configurer et d'utiliser " +"facilement la confidentialité différentielle (DP). Les wrappers DP " +"enfichables permettent une utilisation agnostique du cadre **et** de la " +"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " +"voir les documents de Flower, un nouvel explicatif va plus loin dans les " +"détails." #: ../../source/ref-changelog.md:1086 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" #: ../../source/ref-changelog.md:1088 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " -"fleur avancés avec PyTorch." +"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " +"clients Flower peuvent être construits pour iOS. L'exemple de code " +"contient à la fois des composants Flower iOS SDK qui peuvent être " +"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " +"sur CoreML." #: ../../source/ref-changelog.md:1090 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" #: ../../source/ref-changelog.md:1092 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " -"l'apprentissage fédéré avec JAX et Flower." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:1096 +#: ../../source/ref-changelog.md:1094 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " -"initialisé dans `start_simulation` " -"([#1177](https://github.com/adap/flower/pull/1177))" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:1097 +#: ../../source/ref-changelog.md:1096 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" +"Toutes les exceptions `Client` qui se produisent dans le VCE sont " +"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" +" `Stratégie` configurée (via l'argument `failures`)." #: ../../source/ref-changelog.md:1098 -#, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" -msgstr "" -"Nouvelle documentation pour [mettre en œuvre des " -"stratégies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" - -#: ../../source/ref-changelog.md:1099 -msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -"Nouveau thème de documentation adapté aux mobiles " -"([#1174](https://github.com/adap/flower/pull/1174))" +"**Améliorer le moteur du client virtuel** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" #: ../../source/ref-changelog.md:1100 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " -"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " +"dictionnaire `client_resources` a été remplacé par `float` pour permettre" +" l'allocation de fractions de ressources." -#: ../../source/ref-changelog.md:1104 +#: ../../source/ref-changelog.md:1102 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -"**Supprime la prise en charge obsolète de Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" -#: ../../source/ref-changelog.md:1105 +#: ../../source/ref-changelog.md:1104 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"Le moteur de client virtuel prend désormais en charge les méthodes " +"optionnelles `Client` (et `NumPyClient`)." #: ../../source/ref-changelog.md:1106 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" - -#: ../../source/ref-changelog.md:1107 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" -" ([#869](https://github.com/adap/flower/pull/869))" +"**Fournir des informations de type aux paquets en utilisant** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" #: ../../source/ref-changelog.md:1108 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -msgstr "" -"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" - -#: ../../source/ref-changelog.md:1109 -msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"**Supprime la stratégie DefaultStrategy qui est obsolète** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " +"indiquant que le paquet est typé. Cela permet de prendre en charge le " +"typage pour les projets ou les paquets qui utilisent `flwr` en leur " +"permettant d'améliorer leur code à l'aide de vérificateurs de types " +"statiques comme `mypy`." #: ../../source/ref-changelog.md:1110 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"**Supprimer la prise en charge obsolète de la valeur de retour de la " -"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:1111 +#: ../../source/ref-changelog.md:1112 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -"**Supprime la prise en charge obsolète du passage des paramètres initiaux" -" en tant que ndarrays NumPy** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:1113 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/ref-changelog.md:1117 +#: ../../source/ref-changelog.md:1114 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"**Amélioration de la compatibilité du moteur de client virtuel avec " -"Jupyter Notebook / Google Colab** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Documentation mise à jour** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/ref-changelog.md:1119 +#: ../../source/ref-changelog.md:1116 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -"Les simulations (utilisant le moteur de client virtuel via " -"`start_simulation`) fonctionnent maintenant plus facilement sur les " -"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " -"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." +"Il y a eu tellement de mises à jour de la documentation que cela n'a même" +" pas de sens de les énumérer individuellement." -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:1118 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -"**Nouvel exemple de code Jupyter Notebook** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:1123 +#: ../../source/ref-changelog.md:1120 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -"Un nouvel exemple de code (`quickstart_simulation`) démontre des " -"simulations de Flower en utilisant le moteur de client virtuel via " -"Jupyter Notebook (y compris Google Colab)." +"La documentation a été restructurée pour faciliter la navigation. Ce " +"n'est que la première étape d'un effort plus important visant à faire de " +"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:1122 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -"**Propriétés du client (aperçu des fonctionnalités)** " -"([#795](https://github.com/adap/flower/pull/795))" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:1127 +#: ../../source/ref-changelog.md:1124 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"Les clients peuvent implémenter une nouvelle méthode `get_properties` " -"pour permettre aux stratégies côté serveur d'interroger les propriétés du" -" client." +"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " +"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " +"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " +"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " +"il te suffit d'un simple clic." -#: ../../source/ref-changelog.md:1129 +#: ../../source/ref-changelog.md:1126 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"**Support expérimental d'Android avec TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:1128 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" -" la fois agnostique au niveau du client et du cadre de travail. On peut " -"intégrer des plates-formes client arbitraires et avec cette version, " -"l'utilisation de Flower sur Android est devenue beaucoup plus facile." +"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" +" parties couvrant les stratégies personnalisées (encore WIP) et la " +"distinction entre `Client` et `NumPyClient`. Les parties un et deux " +"existantes ont également été améliorées (beaucoup de petits changements " +"et de corrections)." -#: ../../source/ref-changelog.md:1133 -msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." -msgstr "" -"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " -"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " -"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " -"part entière et une implémentation unifiée de `FedAvg` intégrant la " -"nouvelle fonctionnalité de `FedAvgAndroid`." +#: ../../source/ref-changelog.md:1134 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:1135 -msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" -msgstr "" -"**Rendre le temps de garde gRPC configurable par l'utilisateur et " -"diminuer le temps de garde par défaut** " -"([#1069](https://github.com/adap/flower/pull/1069))" +#: ../../source/ref-changelog.md:1136 +msgid "Highlights" +msgstr "Points forts" -#: ../../source/ref-changelog.md:1137 -msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." -msgstr "" -"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " -"compatibilité de Flower avec davantage d'environnements cloud (par " -"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " -"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " -"spécifiques." +#: ../../source/ref-changelog.md:1138 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" #: ../../source/ref-changelog.md:1139 -msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" -msgstr "" -"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " -"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" -#: ../../source/ref-changelog.md:1141 -msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "" -"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " -"différentiellement privé avec Opacus, PyTorch et Flower." +#: ../../source/ref-changelog.md:1140 +msgid "Configurable `get_parameters`" +msgstr "`get_parameters` configurable" -#: ../../source/ref-changelog.md:1143 +#: ../../source/ref-changelog.md:1141 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -"**Nouvel exemple de code pour les Transformers à visage embrassant** " -"([#863](https://github.com/adap/flower/pull/863))" +"Des tonnes de petits nettoyages d'API résultant en une expérience plus " +"cohérente pour les développeurs" #: ../../source/ref-changelog.md:1145 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"Un nouvel exemple de code (`quickstart_huggingface`) démontre " -"l'utilisation des transformateurs Hugging Face avec Flower." +"Nous tenons à remercier **particulièrement** tous les contributeurs qui " +"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors)) :" #: ../../source/ref-changelog.md:1147 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" - -#: ../../source/ref-changelog.md:1149 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "" -"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" -" MLCube avec Flower." - -#: ../../source/ref-changelog.md:1151 -msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" - -#: ../../source/ref-changelog.md:1153 +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" + +#: ../../source/ref-changelog.md:1151 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -"SSL permet d'établir des connexions cryptées et sécurisées entre les " -"clients et les serveurs. Cette version met en open-source " -"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " -"communication cryptés accessibles à tous les utilisateurs de Flower." +"**Tous les arguments doivent être passés comme des arguments de mot-clé**" +" ([#1338](https://github.com/adap/flower/pull/1338))" + +#: ../../source/ref-changelog.md:1153 +#, fuzzy +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" +"Le code qui utilise des arguments positionnels (par exemple, " +"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" +"clé pour chaque argument positionnel (par exemple, " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." #: ../../source/ref-changelog.md:1155 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Introduire l'objet de configuration** `ServerConfig` **dans** " +"`start_server` **et** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" #: ../../source/ref-changelog.md:1157 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." +"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " +"attendent maintenant un objet de configuration de type " +"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " +"le dict de configuration précédent, mais il rend l'écriture de code " +"sécurisé plus facile et les valeurs des paramètres par défaut plus " +"transparentes." #: ../../source/ref-changelog.md:1159 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " +"([#1334](https://github.com/adap/flower/pull/1334))" #: ../../source/ref-changelog.md:1161 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -"`start_simulation` peut maintenant être appelé avec une liste " -"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " -"identifiants seront passés à `client_fn` chaque fois qu'un client doit " -"être initialisé, ce qui peut faciliter le chargement de partitions de " -"données qui ne sont pas accessibles par des identifiants `int`." +"Les paramètres de stratégie intégrés suivants ont été renommés pour " +"améliorer la lisibilité et la cohérence avec d'autres API :" -#: ../../source/ref-changelog.md:1165 -msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" -msgstr "" -"Mettre à jour le calcul de `num_examples` dans les exemples de code " -"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" +#: ../../source/ref-changelog.md:1163 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/ref-changelog.md:1166 -msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -msgstr "" -"Exposer la version de Flower à travers `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +#: ../../source/ref-changelog.md:1164 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/ref-changelog.md:1167 -msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" -msgstr "" -"`start_server` dans `app.py` renvoie maintenant un objet `History` " -"contenant les métriques de l'entraînement " -"([#974](https://github.com/adap/flower/pull/974))" +#: ../../source/ref-changelog.md:1165 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/ref-changelog.md:1168 +#: ../../source/ref-changelog.md:1167 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Mettre à jour les arguments par défaut des stratégies intégrées** " +"([#1278](https://github.com/adap/flower/pull/1278))" #: ../../source/ref-changelog.md:1169 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "" -"Augmente le temps de sommeil après le démarrage du serveur à trois " -"secondes dans tous les exemples de code " -"([#1086](https://github.com/adap/flower/pull/1086))" - -#: ../../source/ref-changelog.md:1170 -msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"Ajout d'une nouvelle section FAQ à la documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" +" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " +"les clients actuellement disponibles pour l'entraînement et l'évaluation." +" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " +"peuvent retrouver le comportement antérieur en initialisant la stratégie " +"de la manière suivante :" #: ../../source/ref-changelog.md:1171 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" + +#: ../../source/ref-changelog.md:1173 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Et bien d'autres changements sous le capot, des mises à jour de la " -"bibliothèque, des modifications de la documentation et des améliorations " -"de l'outillage !" +"**Ajouter** `server_round` **à** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" #: ../../source/ref-changelog.md:1175 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " -"release build** ([#869](https://github.com/adap/flower/pull/869))" +"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre." #: ../../source/ref-changelog.md:1177 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " -"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " -"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " -"supprimés dans une prochaine version." +"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" +" ([#1334](https://github.com/adap/flower/pull/1334))" #: ../../source/ref-changelog.md:1179 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" +"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " +"maintenant trois paramètres : (1) le cycle actuel " +"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" +" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" +" (`config`)." + +#: ../../source/ref-changelog.md:1181 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" #: ../../source/ref-changelog.md:1183 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"**Moteur expérimental de client virtuel** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " +"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " +"`aggregate_evaluate`) reçoivent le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" +" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " +"renommé de `rnd` à `server_round`." #: ../../source/ref-changelog.md:1185 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -"L'un des objectifs de Flower est de permettre la recherche à grande " -"échelle. Cette version donne un premier aperçu (expérimental) d'une " -"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " -"client virtuel. Les clients virtuels permettent des simulations qui " -"s'étendent à un (très) grand nombre de clients sur une seule machine ou " -"une grappe de calcul. La façon la plus simple de tester la nouvelle " -"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" -" `quickstart_simulation` et `simulation_pytorch`." +"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" #: ../../source/ref-changelog.md:1187 -msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." -msgstr "" -"La fonction est encore expérimentale, il n'y a donc aucune garantie de " -"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " -"prime time et s'accompagne de quelques mises en garde connues. Cependant," -" les personnes curieuses sont encouragées à l'essayer et à faire part de " -"leurs réflexions." +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." #: ../../source/ref-changelog.md:1189 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" #: ../../source/ref-changelog.md:1191 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" +"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " +"`FedFSv0`, `FedFSv1`)." -#: ../../source/ref-changelog.md:1192 +#: ../../source/ref-changelog.md:1193 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " -"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:1194 +#: ../../source/ref-changelog.md:1195 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -"**Nouvel exemple de code PyTorch Lightning** " -"([#617](https://github.com/adap/flower/pull/617))" +"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " +"rendre compte de la nature de ce type." -#: ../../source/ref-changelog.md:1196 +#: ../../source/ref-changelog.md:1197 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"**Nouvel exemple de code d'autocodage variationnel** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Supprimez l'ancien** `force_final_distributed_eval` **de** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:1198 +#: ../../source/ref-changelog.md:1199 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" +"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " +"été un artefact historique, dans cette version il a finalement disparu " +"pour de bon." -#: ../../source/ref-changelog.md:1200 +#: ../../source/ref-changelog.md:1201 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:1204 +#: ../../source/ref-changelog.md:1203 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -"Amélioration de l'exemple de code TensorFlow avancé " -"([#769](https://github.com/adap/flower/pull/769))" +"La méthode `get_parameters` accepte maintenant un dictionnaire de " +"configuration, tout comme `get_properties`, `fit`, et `evaluate`." #: ../../source/ref-changelog.md:1205 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"Avertissement lorsque `min_available_clients` est mal configuré " -"([#830](https://github.com/adap/flower/pull/830))" +"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" +" `config` **paramètre** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:1206 +#: ../../source/ref-changelog.md:1207 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" +"La fonction `start_simulation` accepte maintenant un dictionnaire de " +"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" +" cohérence entre `start_simulation` et `start_server` et facilite la " +"transition entre les deux." -#: ../../source/ref-changelog.md:1207 +#: ../../source/ref-changelog.md:1211 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -"Amélioration du message d'erreur dans `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:1208 +#: ../../source/ref-changelog.md:1213 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -"Exemple de code de démarrage rapide PyTorch amélioré " -"([#852](https://github.com/adap/flower/pull/852))" +"La version précédente de Flower a introduit la prise en charge " +"expérimentale de Python 3.10, cette version déclare la prise en charge de" +" Python 3.10 comme stable." -#: ../../source/ref-changelog.md:1212 +#: ../../source/ref-changelog.md:1215 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -"**Désactivé l'évaluation finale distribuée** " -"([#800](https://github.com/adap/flower/pull/800))" +"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " +"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:1214 +#: ../../source/ref-changelog.md:1217 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"Le comportement précédent consistait à effectuer un dernier tour " -"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " -"souvent pas nécessaire (par exemple, lors de l'utilisation de " -"l'évaluation côté serveur). Le comportement précédent peut être activé en" -" passant `force_final_distributed_eval=True` à `start_server`." +"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " +"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " +"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " +"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " +"l'évaluation centralisée !" -#: ../../source/ref-changelog.md:1216 +#: ../../source/ref-changelog.md:1219 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:1218 +#: ../../source/ref-changelog.md:1221 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " -"refléter la notation donnée dans l'article original (q-FFL est l'objectif" -" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " -"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " -"des raisons de compatibilité (elle sera supprimée dans une prochaine " -"version)." +"Comme pour `start_server`, `start_simulation` accepte maintenant une " +"instance complète de `Server`. Cela permet aux utilisateurs de " +"personnaliser fortement l'exécution des expériences et ouvre la porte à " +"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " +"virtuel." -#: ../../source/ref-changelog.md:1220 +#: ../../source/ref-changelog.md:1223 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:1222 +#: ../../source/ref-changelog.md:1225 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " -"basé sur le moteur expérimental du client virtuel, qui deviendra la " -"nouvelle méthode par défaut pour effectuer la plupart des types de " -"simulations à grande échelle dans Flower. L'exemple existant a été " -"conservé à des fins de référence, mais il pourrait être supprimé à " -"l'avenir." +"De nombreux exemples de code ont reçu de petites ou même de grandes mises" +" à jour de maintenance" -#: ../../source/ref-changelog.md:1224 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:1227 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" #: ../../source/ref-changelog.md:1228 -msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" -"**Nouvelles stratégies intégrées** " -"([#549](https://github.com/adap/flower/pull/549))" +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" + +#: ../../source/ref-changelog.md:1229 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch` (démarrage rapide)" #: ../../source/ref-changelog.md:1230 -msgid "(abstract) FedOpt" -msgstr "(résumé) FedOpt" +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1231 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1232 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow` (en anglais)" -#: ../../source/ref-changelog.md:1233 +#: ../../source/ref-changelog.md:1234 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" +"**Supprime l'exemple de simulation obsolète** " +"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/ref-changelog.md:1235 +#: ../../source/ref-changelog.md:1236 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"Le serveur Flower est maintenant totalement agnostique, toutes les " -"instances restantes de métriques spécifiques à une tâche (telles que " -"`accuracy`) ont été remplacées par des dictionnaires de métriques " -"personnalisées. Flower 0.15 a introduit la possibilité de passer un " -"dictionnaire contenant des métriques personnalisées du client au serveur." -" À partir de cette version, les métriques personnalisées remplacent les " -"métriques spécifiques à une tâche sur le serveur." +"Supprime l'exemple obsolète `simulation` et renomme " +"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" +" au nom de `simulation_pytorch`" -#: ../../source/ref-changelog.md:1237 -#, fuzzy +#: ../../source/ref-changelog.md:1238 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"Les dictionnaires de métriques personnalisés sont maintenant utilisés " -"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " -"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " -"permettent aux fonctions d'évaluation passées aux stratégies intégrées " -"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " -"stratégies peuvent même renvoyer des dictionnaires de métriques " -"*agrégées* pour que le serveur puisse en garder la trace." +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:1239 -#, fuzzy +#: ../../source/ref-changelog.md:1240 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -"Les implémentations de Stratey doivent migrer leurs méthodes " -"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " -"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " -"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " -"`return loss, {\"accuracy\" : accuracy}`." +"Une mise à jour substantielle de la documentation corrige plusieurs " +"petits problèmes de rendu, rend les titres plus succincts pour améliorer " +"la navigation, supprime une bibliothèque obsolète, met à jour les " +"dépendances de la documentation, inclut le module `flwr.common` dans la " +"référence de l'API, inclut le support de la documentation basée sur le " +"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " +"nombre de détails plus petits !" + +#: ../../source/ref-changelog.md:1242 ../../source/ref-changelog.md:1297 +#: ../../source/ref-changelog.md:1366 ../../source/ref-changelog.md:1405 +msgid "**Minor updates**" +msgstr "**Mises à jour mineures**" -#: ../../source/ref-changelog.md:1241 +#: ../../source/ref-changelog.md:1244 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " -"pris en charge), la compatibilité sera supprimée dans une prochaine " -"version." +"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " +"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/ref-changelog.md:1243 +#: ../../source/ref-changelog.md:1245 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -"**Avertissements de migration pour les fonctionnalités obsolètes** " -"([#690](https://github.com/adap/flower/pull/690))" +"Ajouter une connexion gRPC sécurisée à l'exemple de code " +"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:1245 +#: ../../source/ref-changelog.md:1246 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -"Les versions antérieures de Flower ont souvent été migrées vers de " -"nouvelles API, tout en maintenant la compatibilité avec les anciennes " -"API. Cette version introduit des messages d'avertissement détaillés si " -"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " -"d'avertissement fournissent souvent des détails sur la façon de migrer " -"vers des API plus récentes, facilitant ainsi la transition d'une version " -"à l'autre." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" #: ../../source/ref-changelog.md:1247 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Amélioration des docs et des docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"Renomme les messages ProtoBuf pour améliorer la cohérence " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" #: ../../source/ref-changelog.md:1249 -msgid "MXNet example and documentation" -msgstr "Exemple et documentation MXNet" +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1253 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" -" fédération ([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:1255 +#, fuzzy msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"**Serveur agnostique de sérialisation** " -"([#721](https://github.com/adap/flower/pull/721))" +"La première version préliminaire de Flower Baselines est arrivée ! Nous " +"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " +"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html). Avec cette première version préliminaire, nous invitons " +"également la communauté à [contribuer à leurs propres lignes de " +"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." #: ../../source/ref-changelog.md:1257 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -"Le serveur Flower est désormais totalement agnostique en matière de " -"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " -"représente les paramètres sous forme de tableaux NumPy désérialisés) a " -"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " -"Les objets `Parameters` sont totalement agnostiques en matière de " -"sérialisation et représentent les paramètres sous forme de tableaux " -"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " -"d'octets doivent être interprétés (par exemple, pour la " -"sérialisation/désérialisation)." +"**SDK client C++ (aperçu) et exemple de code** " +"([#1111](https://github.com/adap/flower/pull/1111))" #: ../../source/ref-changelog.md:1259 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -"Les stratégies intégrées mettent en œuvre cette approche en gérant en " -"interne la sérialisation et la désérialisation de `Weights`. Les " -"implémentations de stratégies personnalisées ou tierces doivent être " -"mises à jour avec les définitions de méthodes de stratégie légèrement " -"modifiées. Les auteurs de stratégies peuvent consulter le PR " -"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " -"stratégies peuvent facilement migrer vers le nouveau format." +"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " +"code de démarrage rapide qui démontre un client C++ simple utilisant le " +"SDK." #: ../../source/ref-changelog.md:1261 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"Déclassé `flwr.server.Server.evaluate`, utiliser " -"`flwr.server.Server.evaluate_round` à la place " -"([#717](https://github.com/adap/flower/pull/717))" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" #: ../../source/ref-changelog.md:1263 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" +"Python 3.10 est la dernière version stable de Python et Python 3.11 " +"devrait sortir en octobre. Cette version de Flower ajoute une prise en " +"charge expérimentale pour les deux versions de Python." + +#: ../../source/ref-changelog.md:1265 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "" +"**Agréger des mesures personnalisées grâce à des fonctions fournies par " +"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" #: ../../source/ref-changelog.md:1267 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"**Initialisation des paramètres côté serveur** " -"([#658](https://github.com/adap/flower/pull/658))" +"Les stratégies intégrées prennent en charge deux nouveaux arguments, " +"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " +"permettent de passer des fonctions d'agrégation de métriques " +"personnalisées." #: ../../source/ref-changelog.md:1269 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -"Les paramètres du modèle peuvent maintenant être initialisés côté " -"serveur. L'initialisation des paramètres côté serveur fonctionne via une " -"nouvelle méthode `Strategy` appelée `initialize_parameters`." +"**Temps d'attente configurable par l'utilisateur** " +"([#1162](https://github.com/adap/flower/pull/1162))" #: ../../source/ref-changelog.md:1271 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -"Les stratégies intégrées prennent en charge un nouvel argument du " -"constructeur appelé `initial_parameters` pour définir les paramètres " -"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " -"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." +"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " +"valeur `float` en secondes), le serveur attendra *au moins* " +"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/ref-changelog.md:1290 +#: ../../source/ref-changelog.md:1273 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " -"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " -"l'un des clients connectés ses paramètres et les utilisera comme " -"paramètres globaux initiaux)." +"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " +"l'évaluation centralisée dans toutes les stratégies intégrées** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:1294 +#: ../../source/ref-changelog.md:1275 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " -"`flwr.server.strategy.FedAvg`, qui est équivalent)" - -#: ../../source/ref-changelog.md:1296 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"Les stratégies intégrées peuvent maintenant effectuer une évaluation " +"fédérée (c'est-à-dire côté client) et une évaluation centralisée " +"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " +"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/ref-changelog.md:1300 +#: ../../source/ref-changelog.md:1277 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " -"retour** ([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Deux nouveaux tutoriels Jupyter Notebook** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:1302 +#: ../../source/ref-changelog.md:1279 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " -"associant les clés `str` aux valeurs des types suivants : `bool`, " -"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " -"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " -"du serveur !" +"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " +"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/ref-changelog.md:1304 +#: ../../source/ref-changelog.md:1281 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"Cette amélioration a également permis de rendre plus cohérents les types " -"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " -"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " -"d'exemples, et un dictionnaire contenant des valeurs arbitraires " -"spécifiques au problème comme la précision." +"*Introduction à l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1306 +#: ../../source/ref-changelog.md:1283 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -"Au cas où tu te poserais la question : cette fonctionnalité est " -"compatible avec les projets existants, la valeur de retour supplémentaire" -" du dictionnaire est facultative. Le nouveau code doit cependant migrer " -"vers les nouveaux types de retour pour être compatible avec les " -"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " -"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " -"ci-dessous pour plus de détails." +"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1308 +#: ../../source/ref-changelog.md:1285 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"*Exemple de code:* note les valeurs de retour du dictionnaire " -"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" +"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" +" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:1323 +#: ../../source/ref-changelog.md:1287 +#, fuzzy msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -"**Généralisé** `config` **argument dans** `Client.fit` **et** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " +"momentum du serveur [Hsu et al., 2019]." -#: ../../source/ref-changelog.md:1325 +#: ../../source/ref-changelog.md:1289 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " -"signifie que les valeurs du dictionnaire devaient être des chaînes. La " -"nouvelle version généralise cela pour permettre les valeurs des types " -"suivants : `bool`, `bytes`, `float`, `int`, `str`." +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:1327 +#: ../../source/ref-changelog.md:1291 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -"Cela signifie que l'on peut maintenant passer des valeurs presque " -"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " -"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " -"du côté client !" +"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " +"fleur avancés avec PyTorch." -#: ../../source/ref-changelog.md:1329 +#: ../../source/ref-changelog.md:1293 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" -" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" - -#: ../../source/ref-changelog.md:1346 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:1350 +#: ../../source/ref-changelog.md:1295 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -"Nouvel exemple : PyTorch de centralisé à fédéré " -"([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:1351 -msgid "Improved documentation" -msgstr "Amélioration de la documentation" +"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " +"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/ref-changelog.md:1352 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:1299 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" - -#: ../../source/ref-changelog.md:1353 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " +"initialisé dans `start_simulation` " +"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:1354 +#: ../../source/ref-changelog.md:1300 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"Mise à jour de la documentation des exemples " -"([#549](https://github.com/adap/flower/pull/549))" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:1355 +#: ../../source/ref-changelog.md:1301 +#, fuzzy msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"Suppression de la documentation obsolète " -"([#548](https://github.com/adap/flower/pull/548))" - -#: ../../source/ref-changelog.md:1357 -msgid "Bugfix:" -msgstr "Correction de bogues :" +"Nouvelle documentation pour [mettre en œuvre des " +"stratégies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/ref-changelog.md:1359 +#: ../../source/ref-changelog.md:1302 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " -"déconnexion des clients est maintenant gérée dans " -"`flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"Nouveau thème de documentation adapté aux mobiles " +"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:1361 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" - -#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 -msgid "Important changes:" -msgstr "Changements importants :" - -#: ../../source/ref-changelog.md:1365 +#: ../../source/ref-changelog.md:1303 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"Ajout d'un exemple pour les périphériques embarqués " -"([#507](https://github.com/adap/flower/pull/507))" +"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " +"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:1366 +#: ../../source/ref-changelog.md:1307 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**Supprime la prise en charge obsolète de Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/ref-changelog.md:1367 +#: ../../source/ref-changelog.md:1308 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -"Déclassement du paquet `flwr_example` et migration des exemples dans le " -"répertoire de premier niveau `examples` " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" - -#: ../../source/ref-changelog.md:1369 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1371 -msgid "Incompatible changes:" -msgstr "Changements incompatibles :" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:1373 +#: ../../source/ref-changelog.md:1309 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -"Renommé les méthodes de stratégie " -"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" -" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " -"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" -" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " -"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " -"`Strategy` suivantes en conséquence :" - -#: ../../source/ref-changelog.md:1374 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1375 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1376 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1377 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:1381 +#: ../../source/ref-changelog.md:1310 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"Déclassé `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " -"`FedAvg` à la place." +"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" +" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:1382 +#: ../../source/ref-changelog.md:1311 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -"Exemples simplifiés et lignes de base " -"([#484](https://github.com/adap/flower/pull/484))." +"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:1383 +#: ../../source/ref-changelog.md:1312 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " -"de stratégie ([#483](https://github.com/adap/flower/pull/483))." +"**Supprime la stratégie DefaultStrategy qui est obsolète** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1313 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Supprimer la prise en charge obsolète de la valeur de retour de la " +"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:1385 +#: ../../source/ref-changelog.md:1314 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"Amélioration des docstrings `Stratégie` " -"([#470](https://github.com/adap/flower/pull/470))." +"**Supprime la prise en charge obsolète du passage des paramètres initiaux" +" en tant que ndarrays NumPy** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-example-projects.rst:2 -#, fuzzy -msgid "Example projects" -msgstr "Exemples de PyTorch" +#: ../../source/ref-changelog.md:1316 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:1320 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " -"montrent comment Flower peut être utilisé pour fédérer différents types " -"de pipelines d'apprentissage automatique existants, qui s'appuient " -"généralement sur des frameworks d'apprentissage automatique populaires " -"tels que `PyTorch `_ ou `TensorFlow " -"`_." - -#: ../../source/ref-example-projects.rst:9 -#, fuzzy -msgid "The following examples are available as standalone projects." -msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." - -#: ../../source/ref-example-projects.rst:12 -#, fuzzy -msgid "Quickstart TensorFlow/Keras" -msgstr "Démarrage rapide de TensorFlow" +"**Amélioration de la compatibilité du moteur de client virtuel avec " +"Jupyter Notebook / Google Colab** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:1322 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " -"d'images CIFAR-10 avec MobileNetV2 :" +"Les simulations (utilisant le moteur de client virtuel via " +"`start_simulation`) fonctionnent maintenant plus facilement sur les " +"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " +"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." -#: ../../source/ref-example-projects.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:1324 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Nouvel exemple de code Jupyter Notebook** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-example-projects.rst:19 -#, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:1326 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"`Quickstart TensorFlow (Tutorial) `_" +"Un nouvel exemple de code (`quickstart_simulation`) démontre des " +"simulations de Flower en utilisant le moteur de client virtuel via " +"Jupyter Notebook (y compris Google Colab)." -#: ../../source/ref-example-projects.rst:20 +#: ../../source/ref-changelog.md:1328 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -"`Quickstart TensorFlow (Blog Post) `_" - -#: ../../source/ref-example-projects.rst:24 -#: ../../source/tutorial-quickstart-pytorch.rst:4 -msgid "Quickstart PyTorch" -msgstr "Démarrage rapide de PyTorch" +"**Propriétés du client (aperçu des fonctionnalités)** " +"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:1330 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -"L'exemple de démarrage rapide PyTorch montre la classification d'images " -"CIFAR-10 avec un simple réseau neuronal convolutif :" +"Les clients peuvent implémenter une nouvelle méthode `get_properties` " +"pour permettre aux stratégies côté serveur d'interroger les propriétés du" +" client." -#: ../../source/ref-example-projects.rst:29 -#, fuzzy +#: ../../source/ref-changelog.md:1332 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -"`Quickstart PyTorch (Code) " -"`_" +"**Support expérimental d'Android avec TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-example-projects.rst:31 -#, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/ref-changelog.md:1334 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"`Quickstart PyTorch (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:34 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch : De la centralisation à la fédération" +"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" +" la fois agnostique au niveau du client et du cadre de travail. On peut " +"intégrer des plates-formes client arbitraires et avec cette version, " +"l'utilisation de Flower sur Android est devenue beaucoup plus facile." -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:1336 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" -" l'aide de Flower :" +"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " +"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " +"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " +"part entière et une implémentation unifiée de `FedAvg` intégrant la " +"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/ref-example-projects.rst:38 -#, fuzzy +#: ../../source/ref-changelog.md:1338 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"`PyTorch : De la centralisation à la fédération (Code) " -"`_" +"**Rendre le temps de garde gRPC configurable par l'utilisateur et " +"diminuer le temps de garde par défaut** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/ref-example-projects.rst:40 -#, fuzzy +#: ../../source/ref-changelog.md:1340 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -"`PyTorch : De la centralisation à la fédération (Tutoriel) " -"`_" - -#: ../../source/ref-example-projects.rst:44 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" +"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " +"compatibilité de Flower avec davantage d'environnements cloud (par " +"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " +"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " +"spécifiques." -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:1342 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -"Cet exemple montre comment Flower peut être utilisé pour construire un " -"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " -"Jetson :" +"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " +"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/ref-example-projects.rst:49 -#, fuzzy +#: ../../source/ref-changelog.md:1344 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " -"`_" +"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " +"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/ref-example-projects.rst:51 +#: ../../source/ref-changelog.md:1346 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " -"`_" +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:1348 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." +"Un nouvel exemple de code (`quickstart_huggingface`) démontre " +"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/ref-faq.rst -#, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:1350 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-faq.rst:9 +#: ../../source/ref-changelog.md:1352 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" +"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" +" MLCube avec Flower." -#: ../../source/ref-faq.rst:11 -#, fuzzy +#: ../../source/ref-changelog.md:1354 msgid "" -"`Flower simulation PyTorch " -"`_" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-faq.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:1356 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"SSL permet d'établir des connexions cryptées et sécurisées entre les " +"clients et les serveurs. Cette version met en open-source " +"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " +"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:1358 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " -"sur un Raspberry Pi ?" +"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/ref-faq.rst:16 -#, fuzzy +#: ../../source/ref-changelog.md:1360 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -"Trouve le `blog post about federated learning on embedded device ici " -"`_" -" et l'exemple de code GitHub correspondant " -"`_." +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:1362 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " -"sur les appareils Android ?" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-faq.rst:20 -#, fuzzy +#: ../../source/ref-changelog.md:1364 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -"Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub " -"`_." +"`start_simulation` peut maintenant être appelé avec une liste " +"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " +"identifiants seront passés à `client_fn` chaque fois qu'un client doit " +"être initialisé, ce qui peut faciliter le chargement de partitions de " +"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-changelog.md:1368 msgid "" -"`Android Kotlin example `_" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" +"Mettre à jour le calcul de `num_examples` dans les exemples de code " +"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-faq.rst:23 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:1369 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" +"Exposer la version de Flower à travers `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:1370 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" -" ?" +"`start_server` dans `app.py` renvoie maintenant un objet `History` " +"contenant les métriques de l'entraînement " +"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-faq.rst:27 +#: ../../source/ref-changelog.md:1371 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " -"environnement blockchain est disponible ici :" +"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-faq.rst:30 -msgid "`FLock: A Decentralised AI Training Platform `_." +#: ../../source/ref-changelog.md:1372 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" +"Augmente le temps de sommeil après le démarrage du serveur à trois " +"secondes dans tous les exemples de code " +"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-faq.rst:30 -msgid "Contribute to on-chain training the model and earn rewards." +#: ../../source/ref-changelog.md:1373 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" +"Ajout d'une nouvelle section FAQ à la documentation " +"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-faq.rst:31 -#, fuzzy -msgid "Local blockchain with federated learning simulation." -msgstr "Mise à l'échelle de l'apprentissage fédéré" - -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:1374 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"Et bien d'autres changements sous le capot, des mises à jour de la " +"bibliothèque, des modifications de la documentation et des améliorations " +"de l'outillage !" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-changelog.md:1378 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"`Flower rencontre Nevermined vidéo YouTube " -"`_." +"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " +"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-faq.rst:34 -#, fuzzy +#: ../../source/ref-changelog.md:1380 msgid "" -"`Flower meets KOSMoS `_." +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -"`Flower rencontre KOSMoS `_." +"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " +"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " +"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " +"supprimés dans une prochaine version." -#: ../../source/ref-faq.rst:35 -msgid "" -"`Flower meets Talan blog post `_ ." -msgstr "" -"`Flower meets Talan blog post `_ ." +#: ../../source/ref-changelog.md:1382 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/ref-faq.rst:36 +#: ../../source/ref-changelog.md:1386 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"`Flower rencontre Talan Dépôt GitHub " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "Télémétrie" +"**Moteur expérimental de client virtuel** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:1388 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -"Le projet open-source Flower recueille des mesures d'utilisation " -"**anonymes** afin de prendre des décisions éclairées pour améliorer " -"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" -" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " -"confrontés." +"L'un des objectifs de Flower est de permettre la recherche à grande " +"échelle. Cette version donne un premier aperçu (expérimental) d'une " +"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " +"client virtuel. Les clients virtuels permettent des simulations qui " +"s'étendent à un (très) grand nombre de clients sur une seule machine ou " +"une grappe de calcul. La façon la plus simple de tester la nouvelle " +"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" +" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:1390 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** En restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des mesures d'utilisation anonymes." - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "Principes" +"La fonction est encore expérimentale, il n'y a donc aucune garantie de " +"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " +"prime time et s'accompagne de quelques mises en garde connues. Cependant," +" les personnes curieuses sont encouragées à l'essayer et à faire part de " +"leurs réflexions." -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:1392 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"Nous suivons des principes stricts concernant la collecte de données " -"anonymes sur l'utilisation :" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:1394 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " -"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:1395 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " -"contiennent aucune information personnelle identifiable (PII). Voir " -"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " -"mesures sont rapportées." +"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " +"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:1397 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " -"sont rapportées ; voir la section \"[Comment inspecter ce qui est " -"rapporté](#how-to-inspect-what-is-being-reported)\"" +"**Nouvel exemple de code PyTorch Lightning** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-telemetry.md:14 -#, fuzzy +#: ../../source/ref-changelog.md:1399 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " -"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" -"to-contact-us)\" pour plus de détails." - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "Comment se désinscrire" +"**Nouvel exemple de code d'autocodage variationnel** " +"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:1401 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -"Lorsque Flower démarre, il vérifie la présence d'une variable " -"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " -"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " -"supposant que tu démarres un serveur ou un client Flower, fais-le " -"simplement en faisant précéder ta commande de la façon suivante :" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:1403 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " -"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " -"environnement) pour désactiver la télémétrie de la fleur de façon " -"permanente." - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "Mesures collectées" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "La télémétrie des fleurs recueille les métriques suivantes :" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:1407 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -"**Cela nous aide à décider si nous devons investir des efforts dans la " -"publication d'une version corrective pour une version plus ancienne de " -"Flower ou si nous devons plutôt utiliser la bande passante pour " -"développer de nouvelles fonctionnalités." +"Amélioration de l'exemple de code TensorFlow avancé " +"([#769](https://github.com/adap/flower/pull/769))" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:1408 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -"**Système d'exploitation.** Nous permet de répondre à des questions " -"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " -"Windows ?" +"Avertissement lorsque `min_available_clients` est mal configuré " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:1409 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -"**Version de Python.** Connaître la version de Python nous aide, par " -"exemple, à décider si nous devons investir des efforts dans la prise en " -"charge des anciennes versions de Python ou cesser de les prendre en " -"charge et commencer à tirer parti des nouvelles fonctionnalités de " -"Python." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:1410 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -"**Comprendre l'environnement matériel dans lequel Flower est utilisé " -"permet de décider si nous devrions, par exemple, faire plus d'efforts " -"pour prendre en charge les environnements à faibles ressources." +"Amélioration du message d'erreur dans `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:1411 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " -"démarre nous permet de comprendre à quel point certaines fonctionnalités " -"sont utilisées et de mieux établir les priorités en fonction de cela." +"Exemple de code de démarrage rapide PyTorch amélioré " +"([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:1415 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " -"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " -"nous permet de comprendre quels types d'appareils non seulement démarrent" -" les charges de travail Flower, mais aussi les terminent avec succès." +"**Désactivé l'évaluation finale distribuée** " +"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:1417 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -"**Source.** La télémétrie de Flower essaie de stocker un ID de source " -"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " -"télémétrie est généré. L'ID de source est important pour identifier si un" -" problème est récurrent ou si un problème est déclenché par plusieurs " -"clusters fonctionnant simultanément (ce qui arrive souvent en " -"simulation). Par exemple, si un périphérique exécute plusieurs charges de" -" travail en même temps, et que cela entraîne un problème, alors, afin de " -"reproduire le problème, plusieurs charges de travail doivent être " -"démarrées en même temps." +"Le comportement précédent consistait à effectuer un dernier tour " +"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " +"souvent pas nécessaire (par exemple, lors de l'utilisation de " +"l'évaluation côté serveur). Le comportement précédent peut être activé en" +" passant `force_final_distributed_eval=True` à `start_server`." -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:1419 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " -"souhaites que tous les événements enregistrés sous un identifiant de " -"source spécifique soient supprimés, tu peux envoyer une demande de " -"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " -"Tous les événements liés à cet identifiant de source seront alors " -"définitivement supprimés." +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:1421 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -"Nous ne collecterons aucune information personnelle identifiable. Si tu " -"penses que l'une des métriques collectées pourrait être utilisée à " -"mauvais escient de quelque manière que ce soit, merci de [nous " -"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " -"refléter toute modification des métriques collectées et nous publierons " -"les changements dans le journal des modifications (changelog)." +"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " +"refléter la notation donnée dans l'article original (q-FFL est l'objectif" +" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " +"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " +"des raisons de compatibilité (elle sera supprimée dans une prochaine " +"version)." -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:1423 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"Si tu penses que d'autres mesures nous seraient utiles pour mieux " -"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " -"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " -"la vie privée des utilisateurs, nous pourrons les ajouter." - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "Comment inspecter ce qui est rapporté" +"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:1425 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " -"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " -"informations de télémétrie rapportées en définissant la variable " -"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " -"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " -"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " -"sans envoyer de mesures." +"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " +"basé sur le moteur expérimental du client virtuel, qui deviendra la " +"nouvelle méthode par défaut pour effectuer la plupart des types de " +"simulations à grande échelle dans Flower. L'exemple existant a été " +"conservé à des fins de référence, mais il pourrait être supprimé à " +"l'avenir." -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:1427 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" + +#: ../../source/ref-changelog.md:1431 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " -"anonymes, utilise les deux variables d'environnement :" +"**Nouvelles stratégies intégrées** " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "Comment nous contacter" +#: ../../source/ref-changelog.md:1433 +msgid "(abstract) FedOpt" +msgstr "(résumé) FedOpt" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:1436 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " -"traitons les mesures d'utilisation anonymes, contacte-nous via " -"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " -"courriel (`telemetry@flower.ai`)." +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:1438 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" +"Le serveur Flower est maintenant totalement agnostique, toutes les " +"instances restantes de métriques spécifiques à une tâche (telles que " +"`accuracy`) ont été remplacées par des dictionnaires de métriques " +"personnalisées. Flower 0.15 a introduit la possibilité de passer un " +"dictionnaire contenant des métriques personnalisées du client au serveur." +" À partir de cette version, les métriques personnalisées remplacent les " +"métriques spécifiques à une tâche sur le serveur." -#: ../../source/tutorial-quickstart-android.rst:4 -#, fuzzy -msgid "Quickstart Android" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-android.rst:9 +#: ../../source/ref-changelog.md:1440 #, fuzzy msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"Les dictionnaires de métriques personnalisés sont maintenant utilisés " +"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " +"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " +"permettent aux fonctions d'évaluation passées aux stratégies intégrées " +"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " +"stratégies peuvent même renvoyer des dictionnaires de métriques " +"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/tutorial-quickstart-android.rst:11 +#: ../../source/ref-changelog.md:1442 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." - -#: ../../source/tutorial-quickstart-fastai.rst:4 -msgid "Quickstart fastai" -msgstr "Démarrage rapide fastai" +"Les implémentations de Stratey doivent migrer leurs méthodes " +"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " +"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " +"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " +"`return loss, {\"accuracy\" : accuracy}`." -#: ../../source/tutorial-quickstart-fastai.rst:6 -#, fuzzy +#: ../../source/ref-changelog.md:1444 msgid "" -"In this federated learning tutorial we will learn how to train a " -"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `." +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " +"pris en charge), la compatibilité sera supprimée dans une prochaine " +"version." -#: ../../source/tutorial-quickstart-fastai.rst:10 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 -msgid "Then, clone the code example directly from GitHub:" +#: ../../source/ref-changelog.md:1446 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" +"**Avertissements de migration pour les fonctionnalités obsolètes** " +"([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/tutorial-quickstart-fastai.rst:18 +#: ../../source/ref-changelog.md:1448 msgid "" -"This will create a new directory called `quickstart-fastai` containing " -"the following files:" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" +"Les versions antérieures de Flower ont souvent été migrées vers de " +"nouvelles API, tout en maintenant la compatibilité avec les anciennes " +"API. Cette version introduit des messages d'avertissement détaillés si " +"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " +"d'avertissement fournissent souvent des détails sur la façon de migrer " +"vers des API plus récentes, facilitant ainsi la transition d'une version " +"à l'autre." -#: ../../source/tutorial-quickstart-fastai.rst:31 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 -#, fuzzy -msgid "Next, activate your environment, then run:" -msgstr "et active l'environnement virtuel avec :" - -#: ../../source/tutorial-quickstart-fastai.rst:41 +#: ../../source/ref-changelog.md:1450 msgid "" -"This example by default runs the Flower Simulation Engine, creating a " -"federation of 10 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" Let's run the project:" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" +"Amélioration des docs et des docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/tutorial-quickstart-fastai.rst:54 -#: ../../source/tutorial-quickstart-huggingface.rst:61 -#: ../../source/tutorial-quickstart-mlx.rst:60 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 -#: ../../source/tutorial-quickstart-pytorch.rst:62 -#: ../../source/tutorial-quickstart-tensorflow.rst:62 -msgid "With default arguments you will see an output like this one:" -msgstr "" +#: ../../source/ref-changelog.md:1452 +msgid "MXNet example and documentation" +msgstr "Exemple et documentation MXNet" -#: ../../source/tutorial-quickstart-fastai.rst:98 -#: ../../source/tutorial-quickstart-huggingface.rst:112 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 -#: ../../source/tutorial-quickstart-pytorch.rst:103 -#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/ref-changelog.md:1454 msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" +"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" +" fédération ([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-quickstart-fastai.rst:108 -#, fuzzy +#: ../../source/ref-changelog.md:1458 msgid "" -"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " -"in the Flower GitHub repository." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"**Serveur agnostique de sérialisation** " +"([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:1460 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" +"Le serveur Flower est désormais totalement agnostique en matière de " +"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " +"représente les paramètres sous forme de tableaux NumPy désérialisés) a " +"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " +"Les objets `Parameters` sont totalement agnostiques en matière de " +"sérialisation et représentent les paramètres sous forme de tableaux " +"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " +"d'octets doivent être interprétés (par exemple, pour la " +"sérialisation/désérialisation)." -#: ../../source/tutorial-quickstart-huggingface.rst:4 -msgid "Quickstart 🤗 Transformers" -msgstr "Démarrage rapide 🤗 Transformateurs" - -#: ../../source/tutorial-quickstart-huggingface.rst:6 -#, fuzzy +#: ../../source/ref-changelog.md:1462 msgid "" -"In this federated learning tutorial we will learn how to train a large " -"language model (LLM) on the `IMDB " -"`_ dataset using Flower" -" and the 🤗 Hugging Face Transformers library. It is recommended to create" -" a virtual environment and run everything within a :doc:`virtualenv " -"`." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"Les stratégies intégrées mettent en œuvre cette approche en gérant en " +"interne la sérialisation et la désérialisation de `Weights`. Les " +"implémentations de stratégies personnalisées ou tierces doivent être " +"mises à jour avec les définitions de méthodes de stratégie légèrement " +"modifiées. Les auteurs de stratégies peuvent consulter le PR " +"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " +"stratégies peuvent facilement migrer vers le nouveau format." -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:1464 msgid "" -"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " -"project. It will generate all the files needed to run, by default with " -"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " -"The dataset will be partitioned using |flowerdatasets|_'s " -"|iidpartitioner|_." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" +"Déclassé `flwr.server.Server.evaluate`, utiliser " +"`flwr.server.Server.evaluate_round` à la place " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-quickstart-huggingface.rst:17 -#: ../../source/tutorial-quickstart-mlx.rst:17 -#: ../../source/tutorial-quickstart-pytorch.rst:18 -#: ../../source/tutorial-quickstart-tensorflow.rst:18 -#, fuzzy +#: ../../source/ref-changelog.md:1466 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" + +#: ../../source/ref-changelog.md:1470 msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" +"**Initialisation des paramètres côté serveur** " +"([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-quickstart-huggingface.rst:25 +#: ../../source/ref-changelog.md:1472 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``HuggingFace``), give a name to your " -"project, and type in your developer name:" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" +"Les paramètres du modèle peuvent maintenant être initialisés côté " +"serveur. L'initialisation des paramètres côté serveur fonctionne via une " +"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/tutorial-quickstart-huggingface.rst:33 -#: ../../source/tutorial-quickstart-mlx.rst:32 -#: ../../source/tutorial-quickstart-pytorch.rst:34 -#: ../../source/tutorial-quickstart-tensorflow.rst:34 +#: ../../source/ref-changelog.md:1474 msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" +"Les stratégies intégrées prennent en charge un nouvel argument du " +"constructeur appelé `initial_parameters` pour définir les paramètres " +"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " +"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." -#: ../../source/tutorial-quickstart-huggingface.rst:47 -#: ../../source/tutorial-quickstart-mlx.rst:46 -#: ../../source/tutorial-quickstart-pytorch.rst:48 -#: ../../source/tutorial-quickstart-tensorflow.rst:48 +#: ../../source/ref-changelog.md:1493 msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" +"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " +"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " +"l'un des clients connectés ses paramètres et les utilisera comme " +"paramètres globaux initiaux)." -#: ../../source/tutorial-quickstart-huggingface.rst:54 -#: ../../source/tutorial-quickstart-pytorch.rst:55 -#: ../../source/tutorial-quickstart-tensorflow.rst:55 -msgid "To run the project, do:" +#: ../../source/ref-changelog.md:1497 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: ../../source/tutorial-quickstart-huggingface.rst:102 -msgid "You can also run the project with GPU as follows:" -msgstr "" +#: ../../source/ref-changelog.md:1499 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-quickstart-huggingface.rst:109 +#: ../../source/ref-changelog.md:1503 msgid "" -"This will use the default arguments where each ``ClientApp`` will use 2 " -"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" +"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " +"retour** ([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-quickstart-huggingface.rst:120 -#: ../../source/tutorial-quickstart-mlx.rst:110 -#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/ref-changelog.md:1505 msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" +"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " +"associant les clés `str` aux valeurs des types suivants : `bool`, " +"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " +"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " +"du serveur !" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:115 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -#, fuzzy -msgid "The Data" -msgstr "Chargement des données" - -#: ../../source/tutorial-quickstart-huggingface.rst:126 +#: ../../source/ref-changelog.md:1507 msgid "" -"This tutorial uses |flowerdatasets|_ to easily download and partition the" -" `IMDB `_ dataset. In " -"this example you'll make use of the |iidpartitioner|_ to generate " -"``num_partitions`` partitions. You can choose |otherpartitioners|_ " -"available in Flower Datasets. To tokenize the text, we will also load the" -" tokenizer from the pre-trained Transformer model that we'll use during " -"training - more on that in the next section. Each ``ClientApp`` will call" -" this function to create dataloaders with the data that correspond to " -"their data partition." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" +"Cette amélioration a également permis de rendre plus cohérents les types " +"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " +"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " +"d'exemples, et un dictionnaire contenant des valeurs arbitraires " +"spécifiques au problème comme la précision." -#: ../../source/tutorial-quickstart-huggingface.rst:171 -#: ../../source/tutorial-quickstart-mlx.rst:155 -#: ../../source/tutorial-quickstart-pytorch.rst:150 -#: ../../source/tutorial-quickstart-tensorflow.rst:139 -#, fuzzy -msgid "The Model" -msgstr "Entraîne le modèle" - -#: ../../source/tutorial-quickstart-huggingface.rst:173 -#, fuzzy +#: ../../source/ref-changelog.md:1509 msgid "" -"We will leverage 🤗 Hugging Face to federate the training of language " -"models over multiple clients using Flower. More specifically, we will " -"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " -"classification over the dataset of IMDB ratings. The end goal is to " -"detect if a movie rating is positive or negative. If you have access to " -"larger GPUs, feel free to use larger models!" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." +"Au cas où tu te poserais la question : cette fonctionnalité est " +"compatible avec les projets existants, la valeur de retour supplémentaire" +" du dictionnaire est facultative. Le nouveau code doit cependant migrer " +"vers les nouveaux types de retour pour être compatible avec les " +"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " +"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " +"ci-dessous pour plus de détails." -#: ../../source/tutorial-quickstart-huggingface.rst:185 +#: ../../source/ref-changelog.md:1511 msgid "" -"Note that here, ``model_name`` is a string that will be loaded from the " -"``Context`` in the ClientApp and ServerApp." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" +"*Exemple de code:* note les valeurs de retour du dictionnaire " +"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/tutorial-quickstart-huggingface.rst:188 +#: ../../source/ref-changelog.md:1526 msgid "" -"In addition to loading the pretrained model weights and architecture, we " -"also include two utility functions to perform both training (i.e. " -"``train()``) and evaluation (i.e. ``test()``) using the above model. " -"These functions should look fairly familiar if you have some prior " -"experience with PyTorch. Note these functions do not have anything " -"specific to Flower. That being said, the training function will normally " -"be called, as we'll see later, from a Flower client passing its own data." -" In summary, your clients can use standard training/testing functions to " -"perform local training or evaluation:" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" +"**Généralisé** `config` **argument dans** `Client.fit` **et** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-quickstart-huggingface.rst:228 -#: ../../source/tutorial-quickstart-mlx.rst:199 -#: ../../source/tutorial-quickstart-pytorch.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:168 -#, fuzzy -msgid "The ClientApp" -msgstr "client" - -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/ref-changelog.md:1528 msgid "" -"The main changes we have to make to use 🤗 Hugging Face with Flower will " -"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " -"the hood, the ``transformers`` library uses PyTorch, which means we can " -"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" -" the :doc:`Quickstart PyTorch ` tutorial. As" -" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" -" and represented as a list of NumPy arrays. The ``set_weights()`` " -"function that's the opposite: given a list of NumPy arrays it applies " -"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" +"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " +"signifie que les valeurs du dictionnaire devaient être des chaînes. La " +"nouvelle version généralise cela pour permettre les valeurs des types " +"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/tutorial-quickstart-huggingface.rst:241 -#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/ref-changelog.md:1530 msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" +"Cela signifie que l'on peut maintenant passer des valeurs presque " +"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " +"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " +"du côté client !" -#: ../../source/tutorial-quickstart-huggingface.rst:257 -#: ../../source/tutorial-quickstart-pytorch.rst:250 +#: ../../source/ref-changelog.md:1532 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" +"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" +" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" -#: ../../source/tutorial-quickstart-huggingface.rst:283 +#: ../../source/ref-changelog.md:1549 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" + +#: ../../source/ref-changelog.md:1553 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additional hyperparameters in ``pyproject.toml`` and access them here." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" +"Nouvel exemple : PyTorch de centralisé à fédéré " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-quickstart-huggingface.rst:316 -#: ../../source/tutorial-quickstart-mlx.rst:361 -#: ../../source/tutorial-quickstart-pytorch.rst:307 -#: ../../source/tutorial-quickstart-tensorflow.rst:232 -#, fuzzy -msgid "The ServerApp" -msgstr "serveur" +#: ../../source/ref-changelog.md:1554 +msgid "Improved documentation" +msgstr "Amélioration de la documentation" -#: ../../source/tutorial-quickstart-huggingface.rst:318 -msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"|serverappcomponents|_ as opposed to a |client|_ In this example we use " -"the `FedAvg` strategy. To it we pass a randomly initialized model that " -"will server as the global model to federated. Note that the value of " -"``fraction_fit`` is read from the run config. You can find the default " -"value defined in the ``pyproject.toml``." +#: ../../source/ref-changelog.md:1555 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/tutorial-quickstart-huggingface.rst:356 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system for an LLM." -msgstr "" +#: ../../source/ref-changelog.md:1556 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-huggingface.rst:361 +#: ../../source/ref-changelog.md:1557 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_hf_link|_ in the Flower GitHub repository. For a " -"comprehensive example of a federated fine-tuning of an LLM with Flower, " -"refer to the |flowertune|_ example in the Flower GitHub repository." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" +"Mise à jour de la documentation des exemples " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:1558 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" +"Suppression de la documentation obsolète " +"([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-quickstart-ios.rst:4 -#, fuzzy -msgid "Quickstart iOS" -msgstr "Démarrage rapide XGBoost" +#: ../../source/ref-changelog.md:1560 +msgid "Bugfix:" +msgstr "Correction de bogues :" -#: ../../source/tutorial-quickstart-ios.rst:9 -#, fuzzy +#: ../../source/ref-changelog.md:1562 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -"Dans ce tutoriel, nous allons apprendre, comment former un réseau " -"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." +"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " +"déconnexion des clients est maintenant gérée dans " +"`flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." -#: ../../source/tutorial-quickstart-ios.rst:12 -#, fuzzy -msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." -msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +#: ../../source/ref-changelog.md:1564 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-quickstart-ios.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:1566 ../../source/ref-changelog.md:1582 +msgid "Important changes:" +msgstr "Changements importants :" + +#: ../../source/ref-changelog.md:1568 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"Ajout d'un exemple pour les périphériques embarqués " +"([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-quickstart-ios.rst:20 -#, fuzzy +#: ../../source/ref-changelog.md:1569 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/tutorial-quickstart-ios.rst:26 -#, fuzzy +#: ../../source/ref-changelog.md:1570 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" +"Déclassement du paquet `flwr_example` et migration des exemples dans le " +"répertoire de premier niveau `examples` " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/tutorial-quickstart-ios.rst:33 -msgid "Or Poetry:" -msgstr "" +#: ../../source/ref-changelog.md:1572 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" -#: ../../source/tutorial-quickstart-ios.rst:40 -#: ../../source/tutorial-quickstart-scikitlearn.rst:43 -#: ../../source/tutorial-quickstart-xgboost.rst:65 -msgid "Flower Client" -msgstr "Client de la fleur" +#: ../../source/ref-changelog.md:1574 +msgid "Incompatible changes:" +msgstr "Changements incompatibles :" -#: ../../source/tutorial-quickstart-ios.rst:42 +#: ../../source/ref-changelog.md:1576 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" +"Renommé les méthodes de stratégie " +"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" +" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " +"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" +" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " +"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " +"`Strategy` suivantes en conséquence :" -#: ../../source/tutorial-quickstart-ios.rst:80 -msgid "" -"Let's create a new application project in Xcode and add ``flwr`` as a " -"dependency in your project. For our application, we will store the logic " -"of our app in ``FLiOSModel.swift`` and the UI elements in " -"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" -" quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." -msgstr "" +#: ../../source/ref-changelog.md:1577 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#: ../../source/tutorial-quickstart-ios.rst:86 -msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" -msgstr "" +#: ../../source/ref-changelog.md:1578 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" -#: ../../source/tutorial-quickstart-ios.rst:94 +#: ../../source/ref-changelog.md:1579 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1580 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:1584 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" -" ``MLBatchProvider`` object. The preprocessing is done inside " -"``DataLoader.swift``." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" +"Déclassé `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " +"`FedAvg` à la place." -#: ../../source/tutorial-quickstart-ios.rst:112 +#: ../../source/ref-changelog.md:1585 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -"``MLModelInspect``." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" +"Exemples simplifiés et lignes de base " +"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/tutorial-quickstart-ios.rst:118 +#: ../../source/ref-changelog.md:1586 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" +"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " +"de stratégie ([#483](https://github.com/adap/flower/pull/483))." -#: ../../source/tutorial-quickstart-ios.rst:133 +#: ../../source/ref-changelog.md:1587 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function ``startFlwrGRPC``." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" +"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/tutorial-quickstart-ios.rst:141 +#: ../../source/ref-changelog.md:1588 msgid "" -"That's it for the client. We only have to implement ``Client`` or call " -"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" -" ``hostname`` and ``port`` tells the client which server to connect to. " -"This can be done by entering the hostname and port in the application " -"before clicking the start button to start the federated learning process." +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" +"Amélioration des docstrings `Stratégie` " +"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/tutorial-quickstart-ios.rst:148 -#: ../../source/tutorial-quickstart-scikitlearn.rst:179 -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "Flower Server" -msgstr "Serveur de Flower" - -#: ../../source/tutorial-quickstart-ios.rst:150 +#: ../../source/ref-example-projects.rst:2 #, fuzzy +msgid "Example projects" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-example-projects.rst:4 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -"``server.py``, import Flower and start the server:" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -"Pour les charges de travail simples, nous pouvons démarrer un serveur " -"Flower et laisser toutes les possibilités de configuration à leurs " -"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " -"Flower et démarre le serveur :" +"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " +"montrent comment Flower peut être utilisé pour fédérer différents types " +"de pipelines d'apprentissage automatique existants, qui s'appuient " +"généralement sur des frameworks d'apprentissage automatique populaires " +"tels que `PyTorch `_ ou `TensorFlow " +"`_." -#: ../../source/tutorial-quickstart-ios.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:254 -msgid "Train the model, federated!" -msgstr "Entraîne le modèle, fédéré !" +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." + +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "Démarrage rapide de TensorFlow" -#: ../../source/tutorial-quickstart-ios.rst:163 -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-example-projects.rst:14 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " -"généralement un serveur et plusieurs clients. Nous devons donc commencer " -"par démarrer le serveur :" +"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " +"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/tutorial-quickstart-ios.rst:171 +#: ../../source/ref-example-projects.rst:17 +#, fuzzy msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/tutorial-quickstart-ios.rst:177 +#: ../../source/ref-example-projects.rst:19 #, fuzzy -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in ``examples/ios``." +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-example-projects.rst:20 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" +"`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/tutorial-quickstart-jax.rst:4 -msgid "Quickstart JAX" -msgstr "Démarrage rapide de JAX" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "Démarrage rapide de PyTorch" -#: ../../source/tutorial-quickstart-jax.rst:9 -#, fuzzy +#: ../../source/ref-example-projects.rst:26 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" -" pour entraîner un modèle de régression linéaire sur un ensemble de " -"données scikit-learn. Nous structurerons l'exemple de la même manière que" -" notre présentation `PyTorch - De la centralisation à la fédération " -"`_. Tout d'abord, nous construisons une approche" -" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " -"avec JAX " -"`_." -" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." +"L'exemple de démarrage rapide PyTorch montre la classification d'images " +"CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/tutorial-quickstart-jax.rst:20 +#: ../../source/ref-example-projects.rst:29 #, fuzzy msgid "" -"Before we start building our JAX example, we need install the packages " -"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -"Avant de commencer à construire notre exemple JAX, nous devons installer " -"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " -":code:`flwr` :" - -#: ../../source/tutorial-quickstart-jax.rst:28 -msgid "Linear Regression with JAX" -msgstr "Régression linéaire avec JAX" +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:30 +#: ../../source/ref-example-projects.rst:31 #, fuzzy -msgid "" -"We begin with a brief description of the centralized training code based " -"on a ``Linear Regression`` model. If you want a more in-depth explanation" -" of what's going on then have a look at the official `JAX documentation " -"`_." +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -"Nous commençons par une brève description du code d'entraînement " -"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" -" explication plus approfondie de ce qui se passe, jette un coup d'œil à " -"la documentation officielle `JAX `_." +"`Quickstart PyTorch (Tutorial) `_" -#: ../../source/tutorial-quickstart-jax.rst:34 -#, fuzzy +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch : De la centralisation à la fédération" + +#: ../../source/ref-example-projects.rst:36 msgid "" -"Let's create a new file called ``jax_training.py`` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " -"imported. In addition, we need to import ``sklearn`` since we use " -"``make_regression`` for the dataset and ``train_test_split`` to split the" -" dataset into a training and test set. You can see that we do not yet " -"import the ``flwr`` package for federated learning. This will be done " -"later." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " -"composants nécessaires pour un apprentissage traditionnel (centralisé) de" -" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " -":code:`jaxlib` doivent être importés. En outre, nous devons importer " -":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " -"jeu de données et :code:`train_test_split` pour diviser le jeu de données" -" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " -"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " -"fédéré, ce qui sera fait plus tard." +"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" +" l'aide de Flower :" -#: ../../source/tutorial-quickstart-jax.rst:51 +#: ../../source/ref-example-projects.rst:38 #, fuzzy -msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test mentionnés." +"`PyTorch : De la centralisation à la fédération (Code) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:63 +#: ../../source/ref-example-projects.rst:40 #, fuzzy msgid "" -"The model architecture (a very simple ``Linear Regression`` model) is " -"defined in ``load_model()``." +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -"L'architecture du modèle (un modèle :code:`Régression linéaire` très " -"simple) est définie dans :code:`load_model()`." +"`PyTorch : De la centralisation à la fédération (Tutoriel) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:73 -#, fuzzy +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" + +#: ../../source/ref-example-projects.rst:46 msgid "" -"We now need to define the training (function ``train()``), which loops " -"over the training set and measures the loss (function ``loss_fn()``) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a ``grad()`` function (defined in the ``main()`` " -"function and called in ``train()``)." +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," -" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " -":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" -" de perte est séparée puisque JAX prend des dérivés avec une fonction " -":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " -":code:`train()`)." +"Cet exemple montre comment Flower peut être utilisé pour construire un " +"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " +"Jetson :" -#: ../../source/tutorial-quickstart-jax.rst:95 +#: ../../source/ref-example-projects.rst:49 #, fuzzy msgid "" -"The evaluation of the model is defined in the function ``evaluation()``. " -"The function takes all test examples and measures the loss of the linear " -"regression model." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." -" La fonction prend tous les exemples de test et mesure la perte du modèle" -" de régression linéaire." +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:107 -#, fuzzy +#: ../../source/ref-example-projects.rst:51 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the ``jax.grad()`` function is defined in " -"``main()`` and passed to ``train()``." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " -"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " -"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " -"à :code:`train()`." +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:126 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "" -"Tu peux maintenant exécuter ta charge de travail (centralisée) de " -"régression linéaire JAX :" +#: ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "FAQ" -#: ../../source/tutorial-quickstart-jax.rst:132 +#: ../../source/ref-faq.rst:4 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " -"avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." - -#: ../../source/tutorial-quickstart-jax.rst:137 -msgid "JAX meets Flower" -msgstr "JAX rencontre Flower" +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/ref-faq.rst #, fuzzy -msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -"``jax_training.py`` for the *clients* that are connected to the *server*." -" The *server* sends model parameters to the clients. The *clients* run " -"the training and update the parameters. The updated parameters are sent " -"back to the *server*, which averages all received parameter updates. This" -" describes one round of the federated learning process, and we repeat " -"this for multiple rounds." +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -"Le concept de fédération d'une charge de travail existante est toujours " -"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " -"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" -" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " -"clients.Les *clients* exécutent la formation et mettent à jour les " -"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " -"un tour du processus d'apprentissage fédéré, et nous répétons cette " -"opération pour plusieurs tours." +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" -#: ../../source/tutorial-quickstart-jax.rst:167 -#, fuzzy +#: ../../source/ref-faq.rst:9 msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined JAX training in ``jax_training.py``. Our " -"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " -"update the parameters on our JAX model:" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -"Enfin, nous allons définir la logique de notre *client* dans " -":code:`client.py` et nous appuyer sur la formation JAX définie " -"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " -":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" -" les paramètres de notre modèle JAX :" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/ref-faq.rst:11 #, fuzzy msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" -" than ``Client`` if you use a framework with good NumPy interoperability " -"(like JAX) because it avoids some of the boilerplate that would otherwise" -" be necessary. ``FlowerClient`` needs to implement four methods, two " -"methods for getting/setting model parameters, one method for training the" -" model, and one method for testing the model:" +"`Flower simulation PyTorch " +"`_" msgstr "" -"L'implémentation d'un *client* Flower signifie essentiellement " -"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " -":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " -":code:`flwr.client.NumPyClient` et nous l'appellerons " -":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " -"implémenter que :code:`Client` si vous utilisez un framework avec une " -"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " -"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " -"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " -"paramètres du modèle, une méthode pour former le modèle, et une méthode " -"pour tester le modèle :" - -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "``set_parameters (optional)``" -msgstr ":code:`set_parameters (optional)`" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:193 +#: ../../source/ref-faq.rst:12 #, fuzzy -msgid "transform parameters to NumPy ``ndarray``'s" -msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" - -#: ../../source/tutorial-quickstart-jax.rst:203 -msgid "get the updated local model parameters and return them to the server" +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -"récupère les paramètres du modèle local mis à jour et les renvoie au " -"serveur" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:208 -msgid "return the local loss to the server" -msgstr "renvoie la perte locale au serveur" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" +":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " +"sur un Raspberry Pi ?" -#: ../../source/tutorial-quickstart-jax.rst:210 -#, fuzzy -msgid "" -"The challenging part is to transform the JAX model parameters from " -"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " -"`NumPyClient`." -msgstr "" -"La partie la plus difficile consiste à transformer les paramètres du " -"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " -"rendre compatibles avec `NumPyClient`." - -#: ../../source/tutorial-quickstart-jax.rst:213 +#: ../../source/ref-faq.rst:16 #, fuzzy msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``evaluate()`` previously defined in " -"``jax_training.py``. So what we really do here is we tell Flower through " -"our ``NumPyClient`` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give " -"you a better understanding of the data types that get passed around." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " -"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " -"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" -" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." - -#: ../../source/tutorial-quickstart-jax.rst:286 -msgid "Having defined the federation process, we can run it." -msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." +"Trouve le `blog post about federated learning on embedded device ici " +"`_" +" et l'exemple de code GitHub correspondant " +"`_." -#: ../../source/tutorial-quickstart-jax.rst:315 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " -"l'apprentissage fédéré sur deux clients. Félicitations !" +":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " +"sur les appareils Android ?" -#: ../../source/tutorial-quickstart-jax.rst:321 +#: ../../source/ref-faq.rst:20 #, fuzzy msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -"Le code source de cet exemple a été amélioré au fil du temps et peut être" -" trouvé ici : `Quickstart JAX " -"`_. " -"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " -"chargent le même jeu de données." +"Oui. Jetez un coup d'œil à notre `blog post " +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." -#: ../../source/tutorial-quickstart-jax.rst:325 +#: ../../source/ref-faq.rst:22 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"`Android Kotlin example `_" msgstr "" -"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" -" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " -"ne pas ajouter d'autres clients ?" -#: ../../source/tutorial-quickstart-mlx.rst:4 -#, fuzzy -msgid "Quickstart MLX" -msgstr "Démarrage rapide de JAX" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:6 -#, fuzzy -msgid "" -"In this federated learning tutorial we will learn how to train simple MLP" -" on MNIST using Flower and MLX. It is recommended to create a virtual " -"environment and run everything within a :doc:`virtualenv `." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" +" ?" -#: ../../source/tutorial-quickstart-mlx.rst:10 +#: ../../source/ref-faq.rst:27 msgid "" -"Let's use `flwr new` to create a complete Flower+MLX project. It will " -"generate all the files needed to run, by default with the Simulation " -"Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" +"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " +"environnement blockchain est disponible ici :" -#: ../../source/tutorial-quickstart-mlx.rst:25 -msgid "" -"Then, run the command below. You will be prompted to select of the " -"available templates (choose ``MLX``), give a name to your project, and " -"type in your developer name:" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:53 -msgid "To run the project do:" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:102 +#: ../../source/ref-faq.rst:31 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/ref-faq.rst:32 msgid "" -"You can also override the parameters defined in " -"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" +"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/tutorial-quickstart-mlx.rst:116 +#: ../../source/ref-faq.rst:33 msgid "" -"We will use `Flower Datasets `_ to " -"easily download and partition the `MNIST` dataset. In this example you'll" -" make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets:" +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" +"`Flower rencontre Nevermined vidéo YouTube " +"`_." -#: ../../source/tutorial-quickstart-mlx.rst:157 +#: ../../source/ref-faq.rst:34 +#, fuzzy msgid "" -"We define the model as in the `centralized MLX example " -"`_, it's a " -"simple MLP:" +"`Flower meets KOSMoS `_." msgstr "" +"`Flower rencontre KOSMoS `_." -#: ../../source/tutorial-quickstart-mlx.rst:180 +#: ../../source/ref-faq.rst:35 msgid "" -"We also define some utility functions to test our model and to iterate " -"over batches." +"`Flower meets Talan blog post `_ ." msgstr "" +"`Flower meets Talan blog post `_ ." -#: ../../source/tutorial-quickstart-mlx.rst:201 +#: ../../source/ref-faq.rst:36 msgid "" -"The main changes we have to make to use `MLX` with `Flower` will be found" -" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " -"doesn't provide an easy way to convert the model parameters into a list " -"of ``np.array`` objects (the format we need for the serialization of the " -"messages to work)." +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" +"`Flower rencontre Talan Dépôt GitHub " +"`_ ." -#: ../../source/tutorial-quickstart-mlx.rst:206 -msgid "The way MLX stores its parameters is as follows:" -msgstr "" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "Télémétrie" -#: ../../source/tutorial-quickstart-mlx.rst:219 +#: ../../source/ref-telemetry.md:3 msgid "" -"Therefore, to get our list of ``np.array`` objects, we need to extract " -"each array and convert them into a NumPy array:" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" +"Le projet open-source Flower recueille des mesures d'utilisation " +"**anonymes** afin de prendre des décisions éclairées pour améliorer " +"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" +" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " +"confrontés." -#: ../../source/tutorial-quickstart-mlx.rst:228 +#: ../../source/ref-telemetry.md:5 msgid "" -"For the ``set_params()`` function, we perform the reverse operation. We " -"receive a list of NumPy arrays and want to convert them into MLX " -"parameters. Therefore, we iterate through pairs of parameters and assign " -"them to the `weight` and `bias` keys of each layer dict:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** En restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des mesures d'utilisation anonymes." -#: ../../source/tutorial-quickstart-mlx.rst:243 -msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset:" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "Principes" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" +"Nous suivons des principes stricts concernant la collecte de données " +"anonymes sur l'utilisation :" -#: ../../source/tutorial-quickstart-mlx.rst:259 +#: ../../source/ref-telemetry.md:11 msgid "" -"Here, after updating the parameters, we perform the training as in the " -"centralized case, and return the new parameters." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" +"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " +"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." -#: ../../source/tutorial-quickstart-mlx.rst:262 -msgid "And for the ``evaluate()`` method of the client:" +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" +"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " +"contiennent aucune information personnelle identifiable (PII). Voir " +"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " +"mesures sont rapportées." -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/ref-telemetry.md:13 msgid "" -"We also begin by updating the parameters with the ones sent by the " -"server, and then we compute the loss and accuracy using the functions " -"defined above. In the constructor of the ``FlowerClient`` we instantiate " -"the `MLP` model as well as other components such as the optimizer." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" +"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " +"sont rapportées ; voir la section \"[Comment inspecter ce qui est " +"rapporté](#how-to-inspect-what-is-being-reported)\"" -#: ../../source/tutorial-quickstart-mlx.rst:277 +#: ../../source/ref-telemetry.md:14 #, fuzzy -msgid "Putting everything together we have:" -msgstr "Tout assembler" - -#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that " -"``context`` enables you to get access to hyperparemeters defined in " -"``pyproject.toml`` to configure the run. In this tutorial we access, " -"among other hyperparameters, the ``local-epochs`` setting to control the " -"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " -"method." +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" +"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " +"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" +"to-contact-us)\" pour plus de détails." -#: ../../source/tutorial-quickstart-mlx.rst:363 -msgid "" -"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " -"an identical signature to that of ``client_fn()``, but the return type is" -" `ServerAppComponents `_ as " -"opposed to `Client `_. In this example we use the " -"``FedAvg`` strategy." -msgstr "" +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "Comment se désinscrire" -#: ../../source/tutorial-quickstart-mlx.rst:386 -#: ../../source/tutorial-quickstart-pytorch.rst:344 -#: ../../source/tutorial-quickstart-tensorflow.rst:266 +#: ../../source/ref-telemetry.md:18 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:390 -#, fuzzy +"Lorsque Flower démarre, il vérifie la présence d'une variable " +"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " +"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " +"supposant que tu démarres un serveur ou un client Flower, fais-le " +"simplement en faisant précéder ta commande de la façon suivante :" + +#: ../../source/ref-telemetry.md:24 msgid "" -"Check the `source code `_ of the extended version of this tutorial in ``examples" -"/quickstart-mlx`` in the Flower GitHub repository." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " +"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " +"environnement) pour désactiver la télémétrie de la fleur de façon " +"permanente." -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "Mesures collectées" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "La télémétrie des fleurs recueille les métriques suivantes :" + +#: ../../source/ref-telemetry.md:30 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" +"**Cela nous aide à décider si nous devons investir des efforts dans la " +"publication d'une version corrective pour une version plus ancienne de " +"Flower ou si nous devons plutôt utiliser la bande passante pour " +"développer de nouvelles fonctionnalités." -#: ../../source/tutorial-quickstart-pandas.rst:4 -msgid "Quickstart Pandas" -msgstr "Démarrage rapide des Pandas" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" +"**Système d'exploitation.** Nous permet de répondre à des questions " +"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " +"Windows ?" -#: ../../source/tutorial-quickstart-pandas.rst:9 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" +"**Version de Python.** Connaître la version de Python nous aide, par " +"exemple, à décider si nous devons investir des efforts dans la prise en " +"charge des anciennes versions de Python ou cesser de les prendre en " +"charge et commencer à tirer parti des nouvelles fonctionnalités de " +"Python." -#: ../../source/tutorial-quickstart-pandas.rst:11 -#, fuzzy +#: ../../source/ref-telemetry.md:36 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**Comprendre l'environnement matériel dans lequel Flower est utilisé " +"permet de décider si nous devrions, par exemple, faire plus d'efforts " +"pour prendre en charge les environnements à faibles ressources." -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-telemetry.md:38 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" +"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " +"démarre nous permet de comprendre à quel point certaines fonctionnalités " +"sont utilisées et de mieux établir les priorités en fonction de cela." -#: ../../source/tutorial-quickstart-pytorch.rst:6 -#, fuzzy +#: ../../source/ref-telemetry.md:40 msgid "" -"In this federated learning tutorial we will learn how to train a " -"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " +"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " +"nous permet de comprendre quels types d'appareils non seulement démarrent" +" les charges de travail Flower, mais aussi les terminent avec succès." -#: ../../source/tutorial-quickstart-pytorch.rst:11 +#: ../../source/ref-telemetry.md:42 msgid "" -"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" -" generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" +"**Source.** La télémétrie de Flower essaie de stocker un ID de source " +"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " +"télémétrie est généré. L'ID de source est important pour identifier si un" +" problème est récurrent ou si un problème est déclenché par plusieurs " +"clusters fonctionnant simultanément (ce qui arrive souvent en " +"simulation). Par exemple, si un périphérique exécute plusieurs charges de" +" travail en même temps, et que cela entraîne un problème, alors, afin de " +"reproduire le problème, plusieurs charges de travail doivent être " +"démarrées en même temps." -#: ../../source/tutorial-quickstart-pytorch.rst:26 +#: ../../source/ref-telemetry.md:44 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``PyTorch``), give a name to your project, " -"and type in your developer name:" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" +"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " +"souhaites que tous les événements enregistrés sous un identifiant de " +"source spécifique soient supprimés, tu peux envoyer une demande de " +"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " +"Tous les événements liés à cet identifiant de source seront alors " +"définitivement supprimés." -#: ../../source/tutorial-quickstart-pytorch.rst:117 +#: ../../source/ref-telemetry.md:46 msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create dataloaders with the data" -" that correspond to their data partition." +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" +"Nous ne collecterons aucune information personnelle identifiable. Si tu " +"penses que l'une des métriques collectées pourrait être utilisée à " +"mauvais escient de quelque manière que ce soit, merci de [nous " +"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " +"refléter toute modification des métriques collectées et nous publierons " +"les changements dans le journal des modifications (changelog)." -#: ../../source/tutorial-quickstart-pytorch.rst:152 +#: ../../source/ref-telemetry.md:48 msgid "" -"We defined a simple Convolutional Neural Network (CNN), but feel free to " -"replace it with a more sophisticated model if you'd like:" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" +"Si tu penses que d'autres mesures nous seraient utiles pour mieux " +"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " +"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " +"la vie privée des utilisateurs, nous pourrons les ajouter." -#: ../../source/tutorial-quickstart-pytorch.rst:177 +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "Comment inspecter ce qui est rapporté" + +#: ../../source/ref-telemetry.md:52 msgid "" -"In addition to defining the model architecture, we also include two " -"utility functions to perform both training (i.e. ``train()``) and " -"evaluation (i.e. ``test()``) using the above model. These functions " -"should look fairly familiar if you have some prior experience with " -"PyTorch. Note these functions do not have anything specific to Flower. " -"That being said, the training function will normally be called, as we'll " -"see later, from a Flower client passing its own data. In summary, your " -"clients can use standard training/testing functions to perform local " -"training or evaluation:" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" +"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " +"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " +"informations de télémétrie rapportées en définissant la variable " +"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " +"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " +"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " +"sans envoyer de mesures." -#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/ref-telemetry.md:58 msgid "" -"The main changes we have to make to use `PyTorch` with `Flower` will be " -"found in the ``get_weights()`` and ``set_weights()`` functions. In " -"``get_weights()`` PyTorch model parameters are extracted and represented " -"as a list of NumPy arrays. The ``set_weights()`` function that's the " -"oposite: given a list of NumPy arrays it applies them to an existing " -"PyTorch model. Doing this in fairly easy in PyTorch." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" +"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " +"anonymes, utilise les deux variables d'environnement :" -#: ../../source/tutorial-quickstart-pytorch.rst:282 +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "Comment nous contacter" + +#: ../../source/ref-telemetry.md:66 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"`local-epochs` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additioinal hyperparameters in ``pyproject.toml`` and access them here." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" +"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " +"traitons les mesures d'utilisation anonymes, contacte-nous via " +"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " +"courriel (`telemetry@flower.ai`)." -#: ../../source/tutorial-quickstart-pytorch.rst:309 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will server as " -"the global model to federated. Note that the value of ``fraction_fit`` is" -" read from the run config. You can find the default value defined in the " -"``pyproject.toml``." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:348 +#: ../../source/tutorial-quickstart-android.rst:4 #, fuzzy +msgid "Quickstart Android" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" -"Check the `source code `_ of the extended version of this tutorial in " -"``examples/quickstart-pytorch`` in the Flower GitHub repository." +"The experimental Flower Android SDK is not compatible with the latest " +"version of Flower. Android support is currently being reworked and will " +"be released in 2025." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-pytorch.rst:354 -#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#: ../../source/tutorial-quickstart-android.rst:14 +msgid "" +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new Android SDK is released." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:17 #, fuzzy -msgid "Video tutorial" -msgstr "Tutoriel" +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" -#: ../../source/tutorial-quickstart-pytorch.rst:358 +#: ../../source/tutorial-quickstart-android.rst:19 +#, fuzzy msgid "" -"The video shown below shows how to setup a PyTorch + Flower project using" -" our previously recommended APIs. A new video tutorial will be released " -"that shows the new APIs (as the content above does)" +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 -msgid "Quickstart PyTorch Lightning" -msgstr "Démarrage rapide de PyTorch Lightning" +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "Démarrage rapide fastai" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#: ../../source/tutorial-quickstart-fastai.rst:6 #, fuzzy msgid "" -"In this federated learning tutorial we will learn how to train an " -"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"This will create a new directory called `quickstart-pytorch-lightning` " -"containing the following files:" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "et active l'environnement virtuel avec :" + +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"By default, Flower Simulation Engine will be started and it will create a" -" federation of 4 nodes using `FedAvg `_ " "as the aggregation strategy. The dataset will be partitioned using Flower" " Dataset's `IidPartitioner `_." -" To run the project, do:" +" Let's run the project:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-jax.rst:60 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-scikitlearn.rst:59 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +#: ../../source/tutorial-quickstart-xgboost.rst:492 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-jax.rst:102 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-scikitlearn.rst:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/tutorial-quickstart-xgboost.rst:537 msgid "" -"Each simulated `ClientApp` (two per round) will also log a summary of " -"their local training process. Expect this output to be similar to:" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#: ../../source/tutorial-quickstart-fastai.rst:108 #, fuzzy msgid "" "Check the `source code `_ of this tutorial in ``examples" -"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +"/quickstart-fastai>`_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -25782,819 +25264,763 @@ msgstr "" "mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:4 -msgid "Quickstart scikit-learn" -msgstr "Démarrage rapide de scikit-learn" +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "Démarrage rapide 🤗 Transformateurs" -#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#: ../../source/tutorial-quickstart-huggingface.rst:6 #, fuzzy msgid "" -"In this tutorial, we will learn how to train a ``Logistic Regression`` " -"model on MNIST using Flower and scikit-learn." +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " -"régression logistique` sur MNIST en utilisant Flower et scikit-learn." +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -"Il est recommandé de créer un environnement virtuel et de tout exécuter " -"dans ce `virtualenv `_." +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -"*Les clients* sont chargés de générer des mises à jour individuelles des " -"paramètres du modèle en fonction de leurs ensembles de données locales. " -"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " -"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " -"version améliorée du modèle à chaque *client*. Un cycle complet de mises " -"à jour des paramètres s'appelle un *round*." -#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-jax.rst:32 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:30 -#, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:36 -msgid "Or simply install all dependencies using Poetry:" -msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-jax.rst:46 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 #: ../../source/tutorial-quickstart-scikitlearn.rst:45 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within ``utils.py``. The " -"``utils.py`` contains different functions defining all the machine " -"learning basics:" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -"Maintenant que toutes nos dépendances sont installées, exécutons une " -"formation distribuée simple avec deux clients et un serveur. Cependant, " -"avant de configurer le client et le serveur, nous allons définir toutes " -"les fonctionnalités dont nous avons besoin pour notre configuration " -"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " -"contient différentes fonctions définissant toutes les bases de " -"l'apprentissage automatique :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -#, fuzzy -msgid "``get_model_parameters()``" -msgstr ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-pytorch.rst:55 #: ../../source/tutorial-quickstart-scikitlearn.rst:52 -#, fuzzy -msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +#: ../../source/tutorial-quickstart-xgboost.rst:485 +msgid "To run the project, do:" msgstr "" -"Renvoie les paramètres d'un modèle de régression logistique " -":code:`sklearn`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -#, fuzzy -msgid "``set_model_params()``" -msgstr ":code:`set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 -#, fuzzy -msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" -msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -#, fuzzy -msgid "``set_initial_params()``" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"Please check out ``utils.py`` `here " -"`_ for more details. The pre-defined functions are used in" -" the ``client.py`` and imported. The ``client.py`` also requires to " -"import several packages such as Flower and scikit-learn:" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -"Tu peux consulter :code:`utils.py` `ici " -"`_ pour plus de détails. Les fonctions prédéfinies sont " -"utilisées dans :code:`client.py` et importées. :code:`client.py` " -"nécessite également d'importer plusieurs paquets tels que Flower et " -"scikit-learn :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-jax.rst:110 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -"``FederatedDataset.load_partition()`` method loads the partitioned " -"training set for each partition ID defined in the ``--partition-id`` " -"argument." +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-jax.rst:114 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#: ../../source/tutorial-quickstart-xgboost.rst:89 #, fuzzy +msgid "The Data" +msgstr "Chargement des données" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"Next, the logistic regression model is defined and initialized with " -"``utils.set_initial_params()``." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -"Ensuite, le modèle de régression logistique est défini et initialisé avec" -" :code:`utils.set_initial_params()`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-jax.rst:128 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-scikitlearn.rst:138 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 #, fuzzy -msgid "" -"The Flower server interacts with clients through an interface called " -"``Client``. When the server selects a particular client for training, it " -"sends training instructions over the network. The client receives those " -"instructions and calls one of the ``Client`` methods to run your code " -"(i.e., to fit the logistic regression we defined earlier)." -msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" -" la régression logistique que nous avons définie plus tôt)." +msgid "The Model" +msgstr "Entraîne le modèle" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-huggingface.rst:173 #, fuzzy msgid "" -"Flower provides a convenience class called ``NumPyClient`` which makes it" -" easier to implement the ``Client`` interface when your workload uses " -"scikit-learn. Implementing ``NumPyClient`` usually means defining the " -"following methods (``set_parameters`` is optional though):" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " -"signifie généralement définir les méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 -#, fuzzy -msgid "``set_parameters`` (optional)" -msgstr ":code:`set_parameters` (optionnel)" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" -"update the local model weights with the parameters received from the " -"server" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -"mettre à jour les poids du modèle local avec les paramètres reçus du " -"serveur" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:133 -#, fuzzy -msgid "is directly imported with ``utils.set_model_params()``" -msgstr "est directement importé avec :code:`utils.set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:135 -msgid "set the local model weights" -msgstr "fixe les poids du modèle local" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "train the local model" -msgstr "entraîne le modèle local" +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#: ../../source/tutorial-quickstart-xgboost.rst:149 #, fuzzy -msgid "return the updated local model weights" -msgstr "recevoir les poids du modèle local mis à jour" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -msgid "test the local model" -msgstr "teste le modèle local" +msgid "The ClientApp" +msgstr "client" -#: ../../source/tutorial-quickstart-scikitlearn.rst:141 -msgid "The methods can be implemented in the following way:" -msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:163 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" -"We can now create an instance of our class ``MnistClient`` and add one " -"line to actually run this client:" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:170 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"That's it for the client. We only have to implement ``Client`` or " -"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" -" client of type ``NumPyClient`` you'll need to first call its " -"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " -"which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" pass to the client." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons " -":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " -"véritablement fédérée avec le serveur et les clients s'exécutant sur des " -"machines différentes, tout ce qui doit changer est :code:`server_address`" -" que nous transmettons au client." -#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " -"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" -" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" -"learn." -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-jax.rst:246 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-scikitlearn.rst:255 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#: ../../source/tutorial-quickstart-xgboost.rst:269 #, fuzzy -msgid "``server.py``, import Flower and start the server:" -msgstr ":code:`server.py`, importe Flower et démarre le serveur :" +msgid "The ServerApp" +msgstr "serveur" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" -"The number of federated learning rounds is set in ``fit_round()`` and the" -" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " -"is called after each federated learning round and gives you information " -"about loss and accuracy. Note that we also make use of Flower Datasets " -"here to load the test split of the MNIST dataset for server-side " -"evaluation." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -"Le nombre de tours d'apprentissage fédéré est défini dans " -":code:`fit_round()` et l'évaluation est définie dans " -":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " -"chaque tour d'apprentissage fédéré et te donne des informations sur la " -"perte et la précision." -#: ../../source/tutorial-quickstart-scikitlearn.rst:228 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" -"The ``main`` contains the server-side parameter initialization " -"``utils.set_initial_params()`` as well as the aggregation strategy " -"``fl.server.strategy:FedAvg()``. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -"Le :code:`main` contient l'initialisation des paramètres côté serveur " -":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " -":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " -"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" -" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" -" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " -"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " -"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" -" commencer par lancer le serveur :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:264 -#: ../../source/tutorial-quickstart-xgboost.rst:598 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " -"dans différents terminaux. Ouvre un nouveau terminal et démarre le " -"premier client :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "Open another terminal and start the second client:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +#: ../../source/tutorial-quickstart-ios.rst:4 +#, fuzzy +msgid "Quickstart iOS" +msgstr "Démarrage rapide XGBoost" -#: ../../source/tutorial-quickstart-scikitlearn.rst:277 -#: ../../source/tutorial-quickstart-xgboost.rst:611 +#: ../../source/tutorial-quickstart-ios.rst:11 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"The experimental Flower iOS SDK is not compatible with the latest version" +" of Flower. iOS support is currently being reworked and will be released " +"in 2025." msgstr "" -"Chaque client aura son propre ensemble de données. Tu devrais maintenant " -"voir comment la formation se déroule dans le tout premier terminal (celui" -" qui a démarré le serveur) :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:311 -#, fuzzy +#: ../../source/tutorial-quickstart-ios.rst:14 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in ``examples/sklearn-logreg-" -"mnist``." +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new iOS SDK is released." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" -"mnist`." -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-quickstart-ios.rst:17 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a CNN model on CIFAR-10." +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" +"Dans ce tutoriel, nous allons apprendre, comment former un réseau " +"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." -#: ../../source/tutorial-quickstart-tensorflow.rst:4 -msgid "Quickstart TensorFlow" -msgstr "Démarrage rapide de TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#: ../../source/tutorial-quickstart-ios.rst:20 #, fuzzy msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " -"all, it is recommended to create a virtual environment and run everything" -" within a :doc:`virtualenv `." +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-tensorflow.rst:11 +#: ../../source/tutorial-quickstart-ios.rst:25 +#, fuzzy msgid "" -"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " -"will generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." -#: ../../source/tutorial-quickstart-tensorflow.rst:26 +#: ../../source/tutorial-quickstart-ios.rst:28 +#, fuzzy msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``TensorFlow``), give a name to your project," -" and type in your developer name:" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-ios.rst:34 +#, fuzzy msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create the ``NumPy`` arrays that" -" correspond to their data partition." +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" -#: ../../source/tutorial-quickstart-tensorflow.rst:141 +#: ../../source/tutorial-quickstart-ios.rst:41 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:48 +msgid "Flower Client" +msgstr "Client de la fleur" + +#: ../../source/tutorial-quickstart-ios.rst:50 msgid "" -"Next, we need a model. We defined a simple Convolutional Neural Network " -"(CNN), but feel free to replace it with a more sophisticated model if " -"you'd like:" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:170 +#: ../../source/tutorial-quickstart-ios.rst:88 msgid "" -"With `TensorFlow`, we can use the built-in ``get_weights()`` and " -"``set_weights()`` functions, which simplifies the implementation with " -"`Flower`. The rest of the functionality in the ClientApp is directly " -"inspired by the centralized case. The ``fit()`` method in the client " -"trains the model using the local dataset. Similarly, the ``evaluate()`` " -"method is used to evaluate the model received on a held-out validation " -"set that the client might have:" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:203 +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:102 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparameters defined in your " -"``pyproject.toml`` to configure the run. For example, in this tutorial we" -" access the `local-epochs` setting to control the number of epochs a " -"``ClientApp`` will perform when running the ``fit()`` method, in addition" -" to `batch-size`. You could define additional hyperparameters in " -"``pyproject.toml`` and access them here." +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:234 +#: ../../source/tutorial-quickstart-ios.rst:120 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will serve as " -"the global model to federate." +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:270 -#, fuzzy +#: ../../source/tutorial-quickstart-ios.rst:126 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_tf_link|_ in the Flower GitHub repository." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-tensorflow.rst:282 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"The video shown below shows how to setup a TensorFlow + Flower project " -"using our previously recommended APIs. A new video tutorial will be " -"released that shows the new APIs (as the content above does)" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-ios.rst:149 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:4 -msgid "Quickstart XGBoost" -msgstr "Démarrage rapide XGBoost" +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "Flower Server" +msgstr "Serveur de Flower" -#: ../../source/tutorial-quickstart-xgboost.rst:13 +#: ../../source/tutorial-quickstart-ios.rst:158 #, fuzzy -msgid "Federated XGBoost" -msgstr "Formation fédérée" - -#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" +"Pour les charges de travail simples, nous pouvons démarrer un serveur " +"Flower et laisser toutes les possibilités de configuration à leurs " +"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " +"Flower et démarre le serveur :" -#: ../../source/tutorial-quickstart-xgboost.rst:21 +#: ../../source/tutorial-quickstart-ios.rst:169 +msgid "Train the model, federated!" +msgstr "Entraîne le modèle, fédéré !" + +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " +"généralement un serveur et plusieurs clients. Nous devons donc commencer " +"par démarrer le serveur :" -#: ../../source/tutorial-quickstart-xgboost.rst:25 -#, fuzzy -msgid "Why federated XGBoost?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-ios.rst:179 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:31 +#: ../../source/tutorial-quickstart-ios.rst:185 +#, fuzzy msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-xgboost.rst:36 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:46 -msgid "Environment Setup" -msgstr "" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "Démarrage rapide de JAX" -#: ../../source/tutorial-quickstart-xgboost.rst:48 +#: ../../source/tutorial-quickstart-jax.rst:6 #, fuzzy msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train a linear " +"regression model using Flower and `JAX " +"`_. It is recommended to create a " +"virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:51 +#: ../../source/tutorial-quickstart-jax.rst:11 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"Let's use ``flwr new`` to create a complete Flower+JAX project. It will " +"generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_. A random " +"regression dataset will be loaded from scikit-learn's |makeregression|_ " +"function." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -#, fuzzy -msgid "" -"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" -" go ahead and install ``xgboost``:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-xgboost.rst:67 +#: ../../source/tutorial-quickstart-jax.rst:24 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``JAX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:71 -#, fuzzy +#: ../../source/tutorial-quickstart-jax.rst:116 msgid "" -"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " -"and other related functions:" -msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-xgboost.rst:99 -msgid "Dataset partition and hyper-parameter selection" +"This tutorial uses scikit-learn's |makeregression|_ function to generate " +"a random regression problem." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:101 +#: ../../source/tutorial-quickstart-jax.rst:130 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"We defined a simple linear regression model to demonstrate how to create " +"a JAX model, but feel free to replace it with a more sophisticated JAX " +"model if you'd like, (such as with NN-based `Flax " +"`_):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:115 +#: ../../source/tutorial-quickstart-jax.rst:141 msgid "" -"In this example, we split the dataset into 30 partitions with uniform " -"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " -"partition for the given client based on ``partition_id``:" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``evaluation()``) using the above model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:135 +#: ../../source/tutorial-quickstart-jax.rst:172 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for ``xgboost`` package." +"The main changes we have to make to use JAX with Flower will be found in " +"the ``get_params()`` and ``set_params()`` functions. In ``get_params()``," +" JAX model parameters are extracted and represented as a list of NumPy " +"arrays. The ``set_params()`` function is the opposite: given a list of " +"NumPy arrays it applies them to an existing JAX model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:149 +#: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"The functions of ``train_test_split`` and " -"``transform_dataset_to_dmatrix`` are defined as below:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +"The ``get_params()`` and ``set_params()`` functions here are conceptually" +" similar to the ``get_weights()`` and ``set_weights()`` functions that we" +" defined in the :doc:`QuickStart PyTorch ` " +"tutorial." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:190 +#: ../../source/tutorial-quickstart-jax.rst:227 msgid "" -"The ``num_local_round`` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " -"metric." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:195 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-quickstart-jax.rst:248 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the ``FedAvg`` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``input_dim`` is read from the run config. You can find the default value" +" defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:197 +#: ../../source/tutorial-quickstart-jax.rst:276 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define ``XgbClient`` class inherited from " -"``fl.client.Client``." +"Congratulations! You've successfully built and run your first federated " +"learning system for JAX with Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:281 +#, fuzzy msgid "" -"All required parameters defined above are passed to ``XgbClient``'s " -"constructor." +"Check the source code of the extended version of this tutorial in " +"|quickstart_jax_link|_ in the Flower GitHub repository." msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "Démarrage rapide de JAX" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-mlx.rst:6 +#, fuzzy msgid "" -"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " -"insides ``XgbClient`` class as follows." +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:236 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use ``get_parameters`` " -"and ``set_parameters`` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in ``get_parameters`` when it is " -"called by the server at the first round." +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:278 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " -"first set of trees. From the second round, we load the global model sent " -"from server to new build Booster object, and then update model weights on" -" local training data with function ``local_boost`` as follows:" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:298 -msgid "" -"Given ``num_local_round``, we update trees by calling " -"``bst_input.update`` method. After training, the last " -"``N=num_local_round`` trees will be extracted to send to the server." +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" -" function to conduct evaluation on valid set. The AUC value will be " -"returned." +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:333 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" -"Now, we can create an instance of our class ``XgbClient`` and add one " -"line to actually run this client:" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-xgboost.rst:350 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" -"That's it for the client. We only have to implement ``Client`` and call " -"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " -"client which server to connect to. In our case we can run the server and " -"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" -" run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" point the client at." +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." -#: ../../source/tutorial-quickstart-xgboost.rst:360 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-xgboost.rst:364 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" -"In a file named ``server.py``, import Flower and FedXgbBagging from " -"``flwr.server.strategy``." +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés au MXNet :" -#: ../../source/tutorial-quickstart-xgboost.rst:367 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:401 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" -"We use two clients for this example. An ``evaluate_metrics_aggregation`` " -"function is defined to collect and wighted average the AUC values from " -"clients. The ``config_func`` function is to return the current FL round " -"number to client's ``fit()`` and ``evaluate()`` methods." +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:406 -#, fuzzy -msgid "Then, we start the server:" -msgstr "Démarrer le serveur" - -#: ../../source/tutorial-quickstart-xgboost.rst:418 -msgid "Tree-based bagging aggregation" +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:420 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:422 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" -"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " -"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " -"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " -"``evaluate`` methods as follows:" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:519 -msgid "" -"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " -"trees by calling ``aggregate()`` function:" +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:579 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -"``_get_tree_nums``. Then, the fetched information will be aggregated. " -"After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:584 +#: ../../source/tutorial-quickstart-mlx.rst:277 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "Tout assembler" + +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:588 -msgid "Launch Federated XGBoost!" +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:664 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in ``metrics_distributed``." -" One can see that the average AUC increases over FL rounds." +"learning system." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:668 +#: ../../source/tutorial-quickstart-mlx.rst:390 #, fuzzy msgid "" -"The full `source code `_ for this example can be found in ``examples" -"/xgboost-quickstart``." +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -26602,213 +26028,330 @@ msgstr "" "mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-xgboost.rst:673 -msgid "Comprehensive Federated XGBoost" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:675 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:685 +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" + +#: ../../source/tutorial-quickstart-pandas.rst:11 #, fuzzy -msgid "Cyclic training" -msgstr "Formation centralisée" +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/tutorial-quickstart-xgboost.rst:687 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +#: ../../source/tutorial-quickstart-pytorch.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:733 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"The customised ``ClientManager`` samples all available clients in each FL" -" round based on the order of connection to the server. Then, we define a " -"new strategy ``FedXgbCyclic`` in " -"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:775 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"Unlike the original ``FedAvg``, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding ``aggregate_fit``." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:778 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" -"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" -" ensure the clients to be sequentially selected given FL round:" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:840 -msgid "Customised data partitioning" +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:842 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" -"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " -"instantiate the data partitioner based on the given ``num_partitions`` " -"and ``partitioner_type``. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data " -"quantity (uniform, linear, square, exponential)." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:873 -#, fuzzy -msgid "Customised centralised/distributed evaluation" -msgstr "Évaluation centralisée" +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:875 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" -"To facilitate centralised evaluation, we define a function in " -"``server_utils.py``:" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:907 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"This function returns a evaluation function which instantiates a " -"``Booster`` object and loads the global model weights to it. The " -"evaluation is conducted by calling ``eval_set()`` method, and the tested " -"AUC value is reported." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:911 +#: ../../source/tutorial-quickstart-pytorch.rst:348 +#, fuzzy msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the ``evaluate()`` method insides the " -"``XgbClient`` class in ``client_utils.py``." +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-xgboost.rst:916 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 #, fuzzy -msgid "Flower simulation" -msgstr "Simulation de moniteur" +msgid "Video tutorial" +msgstr "Tutoriel" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "Démarrage rapide de PyTorch Lightning" -#: ../../source/tutorial-quickstart-xgboost.rst:918 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#, fuzzy msgid "" -"We also provide an example code (``sim.py``) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:954 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" -"After importing all required packages, we define a ``main()`` function to" -" perform the simulation process:" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1010 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a ``list``. After the simulation begins, the " -"clients won't need to pre-process their partitions again." +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1014 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1065 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#, fuzzy msgid "" -"After that, we start the simulation by calling " -"``fl.simulation.start_simulation``:" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-xgboost.rst:1085 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"One of key parameters for ``start_simulation`` is ``client_fn`` which " -"returns a function to construct a client. We define it as follows:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1126 -msgid "Arguments parser" +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a Logistic" +" Regression on MNIST using Flower and scikit-learn. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:1128 +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"In ``utils.py``, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. " -"Let's first see the sever side:" +"Let's use ``flwr new`` to create a complete Flower+scikit-learn project. " +"It will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_ The dataset " +"will be partitioned using |flowerdatasets|_'s |iidpartitioner|_" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1175 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with ``--centralised-eval``, the sever " -"will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``sklearn``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1180 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `MNIST `_ dataset. In this" +" example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. Each ``ClientApp`` will call this function " +"to create dataloaders with the data that correspond to their data " +"partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1234 +#: ../../source/tutorial-quickstart-scikitlearn.rst:140 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting ``--centralised-eval``, as well as an option to perform " -"scaled learning rate based on the number of clients by setting " -"``--scaled-lr``." +"We define the |logisticregression|_ model from scikit-learn in the " +"``get_model()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1239 -msgid "We also have an argument parser for simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"To perform the training and evaluation, we will make use of the " +"``.fit()`` and ``.score()`` methods available in the " +"``LogisticRegression`` class." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1317 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 +msgid "" +"The main changes we have to make to use scikit-learn with Flower will be " +"found in the ``get_model_params()``, ``set_model_params()``, and " +"``set_initial_params()`` functions. In ``get_model_params()``, the " +"coefficients and intercept of the logistic regression model are extracted" +" and represented as a list of NumPy arrays. In ``set_model_params()``, " +"that's the opposite: given a list of NumPy arrays it applies them to an " +"existing ``LogisticRegression`` model. Finally, in " +"``set_initial_params()``, we initialize the model parameters based on the" +" MNIST dataset, which has 10 classes (corresponding to the 10 digits) and" +" 784 features (corresponding to the size of the MNIST image array, which " +"is 28 × 28). Doing this is fairly easy in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1320 -#, fuzzy -msgid "Example commands" -msgstr "Exemples de PyTorch" +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#: ../../source/tutorial-quickstart-scikitlearn.rst:226 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"``context`` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1329 -#, fuzzy -msgid "Then, on each client terminal, we start the clients:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +#: ../../source/tutorial-quickstart-scikitlearn.rst:257 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a zero-initialized model that will " +"server as the global model to be federated. Note that the values of " +"``num-server-rounds``, ``penalty``, and ``local-epochs`` are read from " +"the run config. You can find the default values defined in the " +"``pyproject.toml``." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1335 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:295 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#: ../../source/tutorial-quickstart-scikitlearn.rst:300 #, fuzzy msgid "" -"The full `code `_ for this comprehensive example can be found in" -" ``examples/xgboost-comprehensive``." +"Check the source code of the extended version of this tutorial in " +"|quickstart_sklearn_link|_ in the Flower GitHub repository." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -26816,190 +26359,746 @@ msgstr "" "mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -#, fuzzy -msgid "Build a strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and the Flower framework (`part 1 " -"`__) and we learned how strategies can be used to customize " -"the execution on both the server and the clients (`part 2 " -"`__)." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__) " -"et nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et sur les clients " -"(`partie 2 `__)." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 #, fuzzy msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg using " -"the Flower framework, Flower Datasets, and PyTorch." +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -"Dans ce carnet, nous allons continuer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit précédemment en créant " -"une version personnalisée de FedAvg (encore une fois, en utilisant " -"`Flower `__ et `PyTorch `__)." +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Flower Discuss and the Flower Slack to connect, " -"ask questions, and get help: - `Join Flower Discuss " -"`__ We'd love to hear from you in the " -"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " -"Beginners``. - `Join Flower Slack `__ We'd " -"love to hear from you in the ``#introductions`` channel! If anything is " -"unclear, head over to the ``#questions`` channel." +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 -#, fuzzy -msgid "Let's build a new ``Strategy`` from scratch! 🌼" -msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 -msgid "Preparation" -msgstr "Préparation" +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -"Avant de commencer le code proprement dit, assurons-nous que nous " -"disposons de tout ce dont nous avons besoin." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 -msgid "Installing dependencies" -msgstr "Installation des dépendances" +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 -msgid "First, we install the necessary packages:" -msgstr "Tout d'abord, nous installons les paquets nécessaires :" +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -"Maintenant que toutes les dépendances sont installées, nous pouvons " -"importer tout ce dont nous avons besoin pour ce tutoriel :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#, fuzzy msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 -msgid "Data loading" -msgstr "Chargement des données" +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation) et enveloppons le tout dans " -"leur propre ``DataLoader``." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "Model training/evaluation" -msgstr "Formation/évaluation du modèle" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "Démarrage rapide XGBoost" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +#: ../../source/tutorial-quickstart-xgboost.rst:7 +msgid "XGBoost" +msgstr "XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:9 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -"Continuons avec la définition habituelle du modèle (y compris " -"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " -"et de test :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 -msgid "Flower client" -msgstr "Client de Flower" +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#: ../../source/tutorial-quickstart-xgboost.rst:19 #, fuzzy +msgid "Why Federated XGBoost?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``partition_id`` to the client and use it log additional details. We then" -" create an instance of ``ClientApp`` and pass it the ``client_fn``." +"As the demand for data privacy and decentralized learning grows, there's " +"an increasing requirement to implement federated XGBoost systems for " +"specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" -"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " -"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " -"transmettons également le ``cid`` au client et l'utilisons pour consigner" -" des détails supplémentaires :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 -msgid "Let's test what we have so far before we continue:" -msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data privacy" +" is paramount. Given the robustness and efficiency of XGBoost, combining " +"it with federated learning offers a promising solution for these specific" +" challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:33 +msgid "" +"In this tutorial, we learn how to train a federated XGBoost model on the " +"HIGGS dataset using Flower and the ``xgboost`` package to perform a " +"binary classification task. We use a simple example (`full code xgboost-" +"quickstart `_) to demonstrate how federated XGBoost works, and then we " +"dive into a more complex comprehensive example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:42 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within a :doc:`virtualenv `." +msgstr "" +"Il est recommandé de créer un environnement virtuel et de tout exécuter " +"dans ce `virtualenv `_." + +#: ../../source/tutorial-quickstart-xgboost.rst:45 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:52 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +#, fuzzy +msgid "The Configurations" +msgstr "Valeurs de configuration" + +#: ../../source/tutorial-quickstart-xgboost.rst:62 +msgid "" +"We define all required configurations / hyper-parameters inside the " +"``pyproject.toml`` file:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:84 +msgid "" +"The ``local-epochs`` represents the number of iterations for local tree " +"boost. We use CPU for the training in default. One can assign it to a GPU" +" by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:91 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `HIGGS` dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:105 +msgid "" +"In this example, we split the dataset into 20 partitions with uniform " +"distribution (`IidPartitioner `_)." +" Then, we load the partition for the given client based on " +"``partition_id``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:110 +msgid "" +"Subsequently, we train/test split using the given partition (client's " +"local data), and reformat data to DMatrix for the ``xgboost`` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:124 +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:151 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Let's first see how we define " +"Flower client for XGBoost. We follow the general rule to define " +"``FlowerClient`` class inherited from ``fl.client.Client``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:176 +msgid "" +"All required parameters defined above are passed to ``FlowerClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:178 +msgid "" +"Then, we override ``fit`` and ``evaluate`` methods insides " +"``FlowerClient`` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:217 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``_local_boost`` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:237 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:265 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:271 +#, fuzzy +msgid "" +"After the local training on clients, clients' model updates are sent to " +"the *server*, which aggregates them to produce a better model. Finally, " +"the *server* sends this improved model version back to each *client* to " +"complete a federated round." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-xgboost.rst:275 +msgid "" +"In the file named ``server_app.py``, we define a strategy for XGBoost " +"bagging aggregation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:308 +msgid "" +"An ``evaluate_metrics_aggregation`` function is defined to collect and " +"wighted average the AUC values from clients. The ``config_func`` function" +" is to return the current FL round number to client's ``fit()`` and " +"``evaluate()`` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "Tree-based Bagging Aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:315 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:317 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:414 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:474 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:479 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by serialisation, and sending the global model back to each " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:483 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:533 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``History (metrics, " +"distributed, evaluate)``. One can see that the average AUC increases over" +" FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:547 +#, fuzzy +msgid "" +"Check the full `source code " +"`_ " +"for this example in ``examples/xgboost-quickstart`` in the Flower GitHub " +"repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-xgboost.rst:552 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:554 +msgid "" +"Now that you know how federated XGBoost works with Flower, it's time to " +"run some more comprehensive experiments by customising the experimental " +"settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised / distributed evaluation. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:562 +#, fuzzy +msgid "Cyclic Training" +msgstr "Formation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:564 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:570 +msgid "To do this, we first customise a ``ClientManager`` in ``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:610 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to the " +"next client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:655 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Customised Data Partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In ``task.py``, we use the ``instantiate_fds`` function to instantiate " +"Flower Datasets and the data partitioner based on the given " +"``partitioner_type`` and ``num_partitions``. Currently, we provide four " +"supported partitioner type to simulate the uniformity/non-uniformity in " +"data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:726 +#, fuzzy +msgid "Customised Centralised / Distributed Evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:728 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 +msgid "" +"This function returns an evaluation function, which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:763 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_app.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:768 +#, fuzzy +msgid "Arguments Explainer" +msgstr "Amélioration de la documentation" + +#: ../../source/tutorial-quickstart-xgboost.rst:770 +msgid "" +"We define all hyper-parameters under ``[tool.flwr.app.config]`` entry in " +"``pyproject.toml``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:799 +msgid "" +"On the server side, we allow user to specify training strategies / FL " +"rounds / participating clients / clients for evaluation, and evaluation " +"fashion. Note that with ``centralised-eval = true``, the sever will do " +"centralised evaluation and all functionalities for client evaluation will" +" be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:804 +msgid "" +"On the client side, we can define various options for client data " +"partitioning. Besides, clients also have an option to conduct evaluation " +"on centralised test set by setting ``centralised-eval = true``, as well " +"as an option to perform scaled learning rate based on the number of " +"clients by setting ``scaled-lr = true``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:810 +#, fuzzy +msgid "Example Commands" +msgstr "Exemples de PyTorch" + +#: ../../source/tutorial-quickstart-xgboost.rst:812 +msgid "To run bagging aggregation for 5 rounds evaluated on centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:818 +msgid "" +"To run cyclic training with linear partitioner type evaluated on " +"centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:827 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-xgboost.rst:833 +#, fuzzy +msgid "Video Tutorial" +msgstr "Tutoriel" + +#: ../../source/tutorial-quickstart-xgboost.rst:837 +msgid "" +"The video shown below shows how to setup a XGBoost + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +#, fuzzy +msgid "Build a strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__) " +"et nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et sur les clients " +"(`partie 2 `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons continuer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit précédemment en créant " +"une version personnalisée de FedAvg (encore une fois, en utilisant " +"`Flower `__ et `PyTorch `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "Préparation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer le code proprement dit, assurons-nous que nous " +"disposons de tout ce dont nous avons besoin." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "Tout d'abord, nous installons les paquets nécessaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" +"Maintenant que toutes les dépendances sont installées, nous pouvons " +"importer tout ce dont nous avons besoin pour ce tutoriel :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "Formation/évaluation du modèle" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" +"Continuons avec la définition habituelle du modèle (y compris " +"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " +"et de test :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" +"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " +"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " +"transmettons également le ``cid`` au client et l'utilisons pour consigner" +" des détails supplémentaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 msgid "Build a Strategy from scratch" @@ -28276,7 +28375,6 @@ msgstr "" " tutoriel sur les fleurs couvrira l'évaluation centralisée." #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 msgid "Final remarks" msgstr "Remarques finales" @@ -28753,8 +28851,9 @@ msgstr "" "qui se rapprochent de l'état actuel de l'art dans le domaine." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#, fuzzy msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"🧑‍🏫 This tutorial starts from zero and expects no familiarity with " "federated learning. Only a basic understanding of data science and Python" " programming is assumed." msgstr "" @@ -28784,12 +28883,14 @@ msgid "Let's get started!" msgstr "Allons-y, déclarons-le !" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#, fuzzy +msgid "Classical Machine Learning" msgstr "Apprentissage automatique classique" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#, fuzzy msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " +"Before we begin discussing federated learning, let us quickly recap how " "most machine learning works today." msgstr "" "Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " @@ -28807,7 +28908,7 @@ msgstr "" "chose d'autre, comme la régression linéaire classique." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|ac0a9766e26044d6aea222a829859b20|" +msgid "|80152fa658904be08c849b4a594b76e1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -28826,7 +28927,7 @@ msgstr "" " Go." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgid "|35b60a1068f944ce937ac2988661aad5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -28836,8 +28937,8 @@ msgstr "Entraîne le modèle à l'aide des données" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 #, fuzzy msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"In practice, the training data we work with doesn't originate on the " +"machine we train the model on." msgstr "" "Dans la pratique, les données d'entraînement avec lesquelles nous " "travaillons ne proviennent pas de la machine sur laquelle nous entraînons" @@ -28846,7 +28947,8 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 #, fuzzy msgid "" -"It originates on a smartphone by the user interacting with an app, a car " +"This data gets created \"somewhere else\". For instance, the data can " +"originate on a smartphone by the user interacting with an app, a car " "collecting sensor data, a laptop receiving input via the keyboard, or a " "smart speaker listening to someone trying to sing a song." msgstr "" @@ -28857,7 +28959,7 @@ msgstr "" "chanson." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgid "|efead7f2c2224b60b7b42705004c15e6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -28878,7 +28980,7 @@ msgstr "" " données pour la même tâche." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgid "|5421fee4e7ed450c903cbcd8a9d8a5d4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -28886,11 +28988,12 @@ msgid "Data is on many devices" msgstr "Les données se trouvent sur de nombreux appareils" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#, fuzzy msgid "" "So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"that has been used in the past was to collect all this data on a central " +"server. This server can be located somewhere in a data center, or " +"somewhere in the cloud." msgstr "" "Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " "d'analyse de données, l'approche utilisée par le passé consistait à " @@ -28899,7 +29002,7 @@ msgstr "" "cloud." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgid "|811fcf35e9214bd5b4e613e41f7c0a27|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -28920,7 +29023,7 @@ msgstr "" "appuyés." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgid "|e61d38b0948f4c07a7257755f3799b54|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -28932,11 +29035,12 @@ msgid "Challenges of classical machine learning" msgstr "Les défis de l'apprentissage automatique classique" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#, fuzzy msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"This classical machine learning approach we've just seen can be used in " +"some cases. Great examples include categorizing holiday photos, or " +"analyzing web traffic. Cases, where all the data is naturally available " +"on a centralized server." msgstr "" "L'approche classique de l'apprentissage automatique que nous venons de " "voir peut être utilisée dans certains cas. Parmi les grands exemples, on " @@ -28945,7 +29049,7 @@ msgstr "" " sur un serveur centralisé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|5405ed430e4746e28b083b146fb71731|" +msgid "|e82c29351e2e480087c61b939eb7c041|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -28964,7 +29068,7 @@ msgstr "" "suffisantes pour former un bon modèle." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|a389e87dab394eb48a8949aa2397687b|" +msgid "|21ca40f4fb1a405c89098fd1d24880a4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -28974,7 +29078,7 @@ msgstr "Impossible de centraliser" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 #, fuzzy msgid "" -"There are many reasons why the classic centralized machine learning " +"There are many reasons why the classical centralized machine learning " "approach does not work for a large number of highly important real-world " "use cases. Those reasons include:" msgstr "" @@ -28992,9 +29096,9 @@ msgid "" "(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " "protect sensitive data from being moved. In fact, those regulations " "sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"users' data for machine learning training because those users live in " +"different parts of the world, and their data is governed by different " +"data protection regulations." msgstr "" "**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " "LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " @@ -29059,27 +29163,30 @@ msgstr "" #, fuzzy msgid "" "Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"detection models." msgstr "" "Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " "former des modèles de détection du cancer" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#, fuzzy msgid "" "Financial information from different organizations to detect financial " -"fraud" +"fraud." msgstr "" "Informations financières provenant de différentes organisations pour " "détecter les fraudes financières" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#, fuzzy +msgid "Location data from your electric car to make better range prediction." msgstr "" "Les données de localisation de ta voiture électrique pour mieux prédire " "l'autonomie" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#, fuzzy +msgid "End-to-end encrypted messages to train better auto-complete models." msgstr "" "Messages cryptés de bout en bout pour former de meilleurs modèles " "d'autocomplétion" @@ -29107,15 +29214,16 @@ msgstr "" "matière d'IA." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#, fuzzy +msgid "Federated Learning" msgstr "Apprentissage fédéré" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#, fuzzy msgid "" -"Federated learning simply reverses this approach. It enables machine " +"Federated Learning simply reverses this approach. It enables machine " "learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"of moving the data to the training. Here's a one-liner explanation:" msgstr "" "L'apprentissage fédéré inverse simplement cette approche. Il permet " "l'apprentissage automatique sur des données distribuées en déplaçant la " @@ -29123,25 +29231,29 @@ msgstr "" "formation. Voici l'explication en une seule phrase :" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#, fuzzy +msgid "Centralized machine learning: move the data to the computation" msgstr "Apprentissage automatique central : déplace les données vers le calcul" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#, fuzzy +msgid "Federated (machine) Learning: move the computation to the data" msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#, fuzzy msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"By doing so, Federated Learning enables us to use machine learning (and " +"other data science approaches) in areas where it wasn't possible before. " +"We can now train excellent medical AI models by enabling different " +"hospitals to work together. We can solve financial fraud by training AI " +"models on the data of different financial institutions. We can build " +"novel privacy-enhancing applications (such as secure messaging) that have" +" better built-in AI than their non-privacy-enhancing alternatives. And " +"those are just a few of the examples that come to mind. As we deploy " +"Federated Learning, we discover more and more areas that can suddenly be " +"reinvented because they now have access to vast amounts of previously " +"inaccessible data." msgstr "" "Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " "d'autres approches de science des données) dans des domaines où cela " @@ -29159,8 +29271,9 @@ msgstr "" "données auparavant inaccessibles." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#, fuzzy msgid "" -"So how does federated learning work, exactly? Let's start with an " +"So how does Federated Learning work, exactly? Let's start with an " "intuitive explanation." msgstr "" "Comment fonctionne l'apprentissage fédéré ? Commençons par une " @@ -29186,7 +29299,7 @@ msgstr "" "partir d'un point de contrôle précédemment sauvegardé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|89c412136a5146ec8dc32c0973729f12|" +msgid "|1351a2629c2c46d981b13b19f9fa45f0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -29206,10 +29319,10 @@ msgstr "" msgid "" "Next, we send the parameters of the global model to the connected client " "nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"organizations). This is to ensure that each participating node starts its" +" local training using the same model parameters. We often use only a few " +"of the connected nodes instead of all nodes. The reason for this is that " +"selecting more and more client nodes has diminishing returns." msgstr "" "Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " "connectés (par exemple, les appareils périphériques comme les smartphones" @@ -29221,7 +29334,7 @@ msgstr "" "rendements décroissants." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgid "|124c2c188b994c7ab1c862cfdb326923|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -29254,7 +29367,7 @@ msgstr "" "données locales, ou même de quelques étapes (mini-batchs)." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgid "|42e1951c36f2406e93c7ae0ec5b299f9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -29285,7 +29398,7 @@ msgstr "" " l'entraînement local." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgid "|ec637b8a84234d068995ee1ccb2dd3b1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -29314,11 +29427,12 @@ msgstr "" "des données de l'ensemble des 100 nœuds clients ?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#, fuzzy msgid "" "In order to get one single model, we have to combine all the model " "updates we received from the client nodes. This process is called " "*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +" way is called *Federated Averaging* (`McMahan et al., 2016 " "`__), often abbreviated as *FedAvg*. " "*FedAvg* takes the 100 model updates and, as the name suggests, averages " "them. To be more precise, it takes the *weighted average* of the model " @@ -29344,7 +29458,7 @@ msgstr "" "times as much as each of the 100 examples." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgid "|5bceb9d16b1a4d2db18d8a5b2f0cacb3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -29426,7 +29540,8 @@ msgstr "" " fédéré." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" +#, fuzzy +msgid "Federated Analytics" msgstr "Analyses fédérées" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 @@ -29487,7 +29602,7 @@ msgstr "" "quel cadre de ML et n'importe quel langage de programmation." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|55472eef61274ba1b739408607e109df|" +msgid "|502b10044e864ca2b15282a393ab7faf|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -29498,6 +29613,11 @@ msgstr "" "Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " "scooter, ordinateur personnel, roomba et téléphone)" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +#, fuzzy +msgid "Final Remarks" +msgstr "Remarques finales" + #: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" "Congratulations, you just learned the basics of federated learning and " @@ -29906,6 +30026,9 @@ msgstr "" #~ " is provided, then `start_server` will " #~ "create one." #~ msgstr "" +#~ "Déclasser `flwr.server.strategy.DefaultStrategy` (migrer" +#~ " vers `flwr.server.strategy.FedAvg`, qui est " +#~ "équivalent)" #~ msgid "" #~ "Currently supported values are `num_rounds`" @@ -29919,6 +30042,9 @@ msgstr "" #~ "strategy is provided, then `start_server` " #~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" +#~ "Déclasser `flwr.server.strategy.DefaultStrategy` (migrer" +#~ " vers `flwr.server.strategy.FedAvg`, qui est " +#~ "équivalent)" #~ msgid "" #~ "An implementation of the abstract base" @@ -29927,6 +30053,9 @@ msgstr "" #~ "`start_simulation` will use " #~ "`flwr.server.client_manager.SimpleClientManager`." #~ msgstr "" +#~ "Déclasser `flwr.server.strategy.DefaultStrategy` (migrer" +#~ " vers `flwr.server.strategy.FedAvg`, qui est " +#~ "équivalent)" #~ msgid "" #~ "Optional dictionary containing arguments for" @@ -30537,15 +30666,13 @@ msgstr "" #~ " doit être installé sur le système." #~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" +#~ msgstr "LLaMA 2 fine-tuning avec Hugging Face et PyTorch" #~ msgid "XGBoost" #~ msgstr "XGBoost" #~ msgid "Android ONNX on-device training" -#~ msgstr "" -#~ "Utiliser Android ONNX pour faire du " -#~ "training directement sur le téléphone" +#~ msgstr "Training sur téléphone à l'aide d'Android ONNX" #~ msgid "Contribute on GitHub" #~ msgstr "Contribuer sur GitHub" @@ -30680,13 +30807,6 @@ msgstr "" #~ " information by default following a " #~ "standard message format:" #~ msgstr "" -#~ "L'enregistreur de Flower garde la trace" -#~ " de tous les événements principaux " -#~ "qui ont lieu dans les charges de" -#~ " travail de l'apprentissage fédéré. Il " -#~ "présente les informations par défaut en" -#~ " suivant un format de message " -#~ "standard :" #~ msgid "" #~ "containing relevant information including: log" @@ -30700,7 +30820,7 @@ msgstr "" #~ msgstr "" #~ msgid "Saving log to file" -#~ msgstr "Enregistrement du journal dans un fichier" +#~ msgstr "" #~ msgid "" #~ "By default, the Flower log is " @@ -30741,7 +30861,7 @@ msgstr "" #~ ":code:`identifier` :" #~ msgid "Log your own messages" -#~ msgstr "Enregistrer tes propres messages" +#~ msgstr "Loggez vos propres messages" #~ msgid "" #~ "You might expand the information shown" @@ -30750,12 +30870,6 @@ msgstr "" #~ " your application. You can achieve " #~ "this easily as follows." #~ msgstr "" -#~ "Tu peux élargir les informations " -#~ "affichées par défaut avec le logger " -#~ "Flower en ajoutant d'autres messages " -#~ "pertinents pour ton application. Tu peux" -#~ " y parvenir facilement en procédant " -#~ "comme suit." #~ msgid "" #~ "In this way your logger will show," @@ -30763,13 +30877,9 @@ msgstr "" #~ " the ones introduced by the clients" #~ " as specified above." #~ msgstr "" -#~ "De cette façon, ton logger affichera," -#~ " en plus des messages par défaut, " -#~ "ceux introduits par les clients comme" -#~ " spécifié ci-dessus." #~ msgid "Log to a remote service" -#~ msgstr "Se connecter à un service distant" +#~ msgstr "" #~ msgid "" #~ "The :code:`fl.common.logger.configure` function, " @@ -31033,9 +31143,6 @@ msgstr "" #~ "pytorch>`_: 100 clients collaboratively train" #~ " a CNN model on MNIST." #~ msgstr "" -#~ "`Quickstart PyTorch (Code) " -#~ "`_" #~ msgid "" #~ "Flower's :code:`VirtualClientEngine` allows you " @@ -34830,465 +34937,2826 @@ msgstr "" #~ "scripts qui font partie de ce " #~ "guide." -#~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "" +#~ "Nous allons maintenant montrer comment " +#~ "écrire un serveur qui utilise les " +#~ "scripts générés précédemment." + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" +#~ "Lorsqu'il fournit des certificats, le " +#~ "serveur attend un tuple de trois " +#~ "certificats. :code:`Path` peut être utilisé" +#~ " pour lire facilement le contenu de" +#~ " ces fichiers en chaînes d'octets, ce" +#~ " qui est le type de données " +#~ "attendu par :code:`start_server`." + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "Flower server" +#~ msgstr "Serveur de Flower" + +#~ msgid "" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." +#~ msgstr "" + +#~ msgid "" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." +#~ msgstr "" + +#~ msgid "Using a different Flower or Python version" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" +#~ "Flower 1.0 : ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." +#~ msgstr "" + +#~ msgid "start\\_driver" +#~ msgstr "start_client" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "The Driver object to use." +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "" + +#~ msgid "Quickstart MXNet" +#~ msgstr "Démarrage rapide de MXNet" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un modèle :code:`Sequential` " +#~ "sur MNIST à l'aide de Flower et" +#~ " de MXNet." + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, lançons une formation " +#~ "distribuée simple avec deux clients et" +#~ " un serveur. Notre procédure de " +#~ "formation et l'architecture du réseau " +#~ "sont basées sur le tutoriel de " +#~ "reconnaissance de chiffres écrits à la" +#~ " main du MXNet " +#~ "`_." + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "au MXNet :" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" +#~ "Nous utilisons MXNet pour charger MNIST," +#~ " un ensemble de données de " +#~ "classification d'images populaire de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " +#~ "télécharge les données d'entraînement et " +#~ "de test." + +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "" +#~ "Définis l'entraînement et la perte avec" +#~ " MXNet. Nous entraînons le modèle en" +#~ " parcourant en boucle l'ensemble des " +#~ "données, nous mesurons la perte " +#~ "correspondante et nous l'optimisons." + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "" +#~ "Ensuite, nous définissons la validation " +#~ "de notre modèle d'apprentissage automatique." +#~ " Nous effectuons une boucle sur " +#~ "l'ensemble de test et mesurons à " +#~ "la fois la perte et la précision" +#~ " sur l'ensemble de test." + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "" +#~ "Après avoir défini la formation et " +#~ "le test d'un modèle d'apprentissage " +#~ "automatique MXNet, nous utilisons ces " +#~ "fonctions pour mettre en œuvre un " +#~ "client Flower." + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" +#~ "Après avoir chargé l'ensemble de données" +#~ " avec :code:`load_data()`, nous effectuons " +#~ "une propagation vers l'avant pour " +#~ "initialiser le modèle et les paramètres" +#~ " du modèle avec :code:`model(init)`. " +#~ "Ensuite, nous implémentons un client " +#~ "Flower." + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "l'implémentation de l'interface :code:`Client` " +#~ "lorsque ta charge de travail utilise " +#~ "MXNet. L'implémentation de :code:`NumPyClient` " +#~ "signifie généralement la définition des " +#~ "méthodes suivantes (:code:`set_parameters` est " +#~ "cependant facultatif) :" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`MNISTClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"0.0.0:8080\"` indique au client à " +#~ "quel serveur se connecter. Dans notre" +#~ " cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" +#~ " charge de travail véritablement fédérée" +#~ " avec le serveur et les clients " +#~ "s'exécutant sur des machines différentes, " +#~ "tout ce qui doit changer est " +#~ ":code:`server_address` que nous transmettons " +#~ "au client." + +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "" +#~ "Le client et le serveur étant " +#~ "prêts, nous pouvons maintenant tout " +#~ "exécuter et voir l'apprentissage fédéré " +#~ "en action. Les systèmes d'apprentissage " +#~ "fédéré ont généralement un serveur et" +#~ " plusieurs clients. Nous devons donc " +#~ "commencer par démarrer le serveur :" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-mxnet`." + +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" + +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" + +#~ msgid "Shuffles data and its label" +#~ msgstr "Mélange les données et leur étiquette" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" +#~ "Nous chargeons l'ensemble de données " +#~ "MNIST de `OpenML `_," +#~ " un ensemble de données de " +#~ "classification d'images populaires de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " +#~ "les données d'entraînement et de test." +#~ " L'ensemble d'entraînement est ensuite " +#~ "divisé en 10 partitions avec " +#~ ":code:`utils.partition()`." + +#~ msgid "Let's get stated!" +#~ msgstr "Allons-y, déclarons-le !" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" + +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" + +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" + +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" + +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" + +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" + +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" + +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" + +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" + +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" + +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" + +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" + +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" + +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub `_" +#~ " that include all necessary dependencies" +#~ " for running the SuperLink. You can" +#~ " also build your own custom docker" +#~ " images from scratch with a different" +#~ " version of Python or Ubuntu if " +#~ "that is what you need. In this " +#~ "guide, we will explain what images " +#~ "exist and how to build them " +#~ "locally." +#~ msgstr "" + +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" + +#~ msgid "" +#~ "Both, base and SuperLink image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." +#~ msgstr "" + +#~ msgid "``3.11``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``UBUNTU_VERSION``" +#~ msgstr "" + +#~ msgid "Version of the official Ubuntu Docker image." +#~ msgstr "" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "" + +#~ msgid "" +#~ "The following example creates a base " +#~ "image with Python 3.11.0, pip 23.0.1 " +#~ "and setuptools 69.0.2:" +#~ msgstr "" + +#~ msgid "Building the SuperLink image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "" + +#~ msgid "The Python version of the base image." +#~ msgstr "Évaluer la réponse d'un client." + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "" + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "" + +#~ msgid "The PyPI package to install." +#~ msgstr "" + +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Flux de travail" + +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink image with the official Flower" +#~ " base image py3.11-ubuntu22.04 and Flower" +#~ " 1.8.0:" +#~ msgstr "" + +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY``, " +#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " +#~ "arguments." +#~ msgstr "" + +#~ msgid "Creating New Messages" +#~ msgstr "Création de nouveaux messages" + +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "" +#~ "Voici un guide simple pour créer " +#~ "un nouveau type de message entre " +#~ "le serveur et les clients dans " +#~ "Flower." + +#~ msgid "" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "" +#~ "Supposons que nous ayons les fonctions" +#~ " suivantes dans :code:`server.py` et " +#~ ":code:`numpy_client.py`..." + +#~ msgid "Server's side:" +#~ msgstr "Côté serveur :" + +#~ msgid "Client's side:" +#~ msgstr "Côté client :" + +#~ msgid "" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "" +#~ "Voyons maintenant ce que nous devons " +#~ "mettre en œuvre pour que cette " +#~ "simple fonction entre le serveur et " +#~ "le client fonctionne !" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "Types de messages pour les tampons de protocole" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" +#~ "La première chose à faire est de" +#~ " définir un type de message pour " +#~ "le système RPC dans :code:`transport.proto`." +#~ " Notez que nous devons le faire " +#~ "à la fois pour les messages de " +#~ "demande et de réponse. Pour plus " +#~ "de détails sur la syntaxe de " +#~ "proto3, veuillez consulter la `documentation" +#~ " officielle `_." + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "Dans le bloc :code:`ServerMessage` :" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "Dans le bloc ClientMessage :" + +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "" +#~ "Veille à ajouter également un champ " +#~ "du type de message nouvellement créé " +#~ "dans :code:`oneof msg`." + +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" + +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" + +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "Fonctions de sérialisation et de désérialisation" + +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" +#~ "La prochaine étape consiste à ajouter" +#~ " des fonctions pour sérialiser et " +#~ "désérialiser les types de données Python" +#~ " vers ou à partir des types de" +#~ " messages RPC définis. Tu dois " +#~ "ajouter ces fonctions dans :code:`serde.py`." + +#~ msgid "The four functions:" +#~ msgstr "Les quatre fonctions :" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "Envoi du message à partir du serveur" + +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "" +#~ "Écris maintenant la fonction de demande" +#~ " dans ta classe Client Proxy (par " +#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" +#~ " les fonctions serde que tu viens " +#~ "de créer :" + +#~ msgid "Receiving the Message by the Client" +#~ msgstr "Réception du message par le client" + +#~ msgid "" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" +#~ "Dernière étape ! Modifie le code " +#~ "dans :code:`message_handler.py` pour vérifier " +#~ "le champ de ton message et appeler" +#~ " la fonction :code:`example_response`. N'oublie" +#~ " pas d'utiliser les fonctions serde !" + +#~ msgid "Within the handle function:" +#~ msgstr "Dans le cadre de la fonction de poignée :" + +#~ msgid "And add a new function:" +#~ msgstr "Et ajoute une nouvelle fonction :" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "" +#~ "Avec un peu de chance, lorsque tu" +#~ " exécuteras ton programme, tu obtiendras" +#~ " le résultat escompté !" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." +#~ msgstr "" + +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "flower-fleet-api" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "" + +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "" + +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "" + +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "" + +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "" + +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "" + +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "" + +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "" + +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "" + +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "" + +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" + +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "" + +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" + +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "" + +#~ msgid "Edge Client Engine" +#~ msgstr "Moteur client Edge" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ architecture de " +#~ "base avec Edge Client Engine" + +#~ msgid "Virtual Client Engine" +#~ msgstr "Moteur de client virtuel" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "" +#~ "`Flower `_ architecture de " +#~ "base avec moteur de client virtuel" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "" +#~ "Moteur client virtuel et moteur client" +#~ " Edge dans la même charge de " +#~ "travail" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ architecture de " +#~ "base avec un moteur de client " +#~ "virtuel et un moteur de client " +#~ "périphérique" + +#~ msgid "How to build Docker Flower images locally" +#~ msgstr "" + +#~ msgid "Clone the flower repository." +#~ msgstr "**Fourche le dépôt de Flower**" + +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" + +#~ msgid "``22.04``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``23.0.1``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "" +#~ "The following example creates a base " +#~ "Ubuntu/Alpine image with Python 3.11.0, " +#~ "pip 23.0.1, setuptools 69.0.2 and Flower" +#~ " 1.8.0:" +#~ msgstr "" + +#~ msgid "" +#~ "The name of image is ``flwr_base`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." +#~ msgstr "" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" + +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "build argument." +#~ msgstr "" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" + +#~ msgid "**Via the UI**" +#~ msgstr "**Review the PR**" + +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" + +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" + +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" + +#~ msgid "Preliminarities" +#~ msgstr "" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "Exemple : JAX - Exécuter JAX Federated" + +#~ msgid "" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" + +#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgstr "" + +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" + +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" + +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" + +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" + +#~ msgid "Quickstart" +#~ msgstr "Démarrage rapide de JAX" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "" + +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" + +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" + +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" + +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" + +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" + +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" + +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" + +#~ msgid "Run with root user privileges" +#~ msgstr "" + +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" + +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" + +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" + +#~ msgid "Using a different Flower version" +#~ msgstr "" + +#~ msgid "Pinning a Docker image to a specific version" +#~ msgstr "" + +#~ msgid "" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgstr "" + +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.client `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.common `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.simulation `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid ":py:obj:`Context `\\ \\(state\\)" +#~ msgstr "" + +#~ msgid "State of your run." +#~ msgstr "" + +#~ msgid "Metrics record." +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" +#~ msgstr "" + +#~ msgid "Remove all items from R." +#~ msgstr "" + +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" + +#~ msgid "d defaults to None." +#~ msgstr "" + +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" + +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" + +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr "" + +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" +#~ msgstr "" + +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." +#~ msgstr "" + +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "flower-fleet-api" + +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" + +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" + +#~ msgid "key shares." +#~ msgstr "" + +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" + +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à entraîner un réseau neuronal " +#~ "convolutif sur CIFAR10 à l'aide de " +#~ "Flower et PyTorch." + +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locales. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "générale de ce qui se passe, " +#~ "commençons. Nous devons d'abord installer " +#~ "Flower. Tu peux le faire en " +#~ "exécutant :" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, allons-y et installons " +#~ "PyTorch et la bibliothèque **torchvision** " +#~ ":" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons une " +#~ "formation distribuée simple avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " de formation et l'architecture de " +#~ "notre réseau sont basées sur `Deep " +#~ "Learning with PyTorch " +#~ "`_" +#~ " de PyTorch." + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "à PyTorch :" + +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" +#~ "En outre, nous définissons l'attribution " +#~ "des appareils dans PyTorch avec :" + +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" +#~ "Nous utilisons PyTorch pour charger " +#~ "CIFAR10, un ensemble de données de " +#~ "classification d'images colorées populaire " +#~ "pour l'apprentissage automatique. Le " +#~ ":code:`DataLoader()` de PyTorch télécharge les" +#~ " données d'entraînement et de test " +#~ "qui sont ensuite normalisées." + +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" +#~ "Définis la perte et l'optimiseur avec" +#~ " PyTorch L'entraînement de l'ensemble de" +#~ " données se fait en bouclant sur " +#~ "l'ensemble de données, en mesurant la" +#~ " perte correspondante et en l'optimisant." + +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" +#~ "Définis ensuite la validation du réseau" +#~ " d'apprentissage automatique. Nous passons " +#~ "en boucle sur l'ensemble de test " +#~ "et mesurons la perte et la " +#~ "précision de l'ensemble de test." + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" +#~ "Après avoir défini l'entraînement et le" +#~ " test d'un modèle d'apprentissage " +#~ "automatique PyTorch, nous utilisons les " +#~ "fonctions pour les clients Flower." + +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" +#~ "Les clients de Flower utiliseront un " +#~ "CNN simple adapté de \"PyTorch : A" +#~ " 60 Minute Blitz\" :" + +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "" +#~ "Après avoir chargé l'ensemble des " +#~ "données avec :code:`load_data()`, nous " +#~ "définissons l'interface Flower." + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise PyTorch. Mettre en œuvre" +#~ " :code:`NumPyClient` signifie généralement " +#~ "définir les méthodes suivantes " +#~ "(:code:`set_parameters` est cependant facultatif)" +#~ " :" + +#~ msgid "which can be implemented in the following way:" +#~ msgstr "qui peut être mis en œuvre de la manière suivante :" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-pytorch`." + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" + +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" +#~ msgstr "" + +#~ msgid "Implementing a Flower client" +#~ msgstr "Mise en place d'un client Flower" + +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" +#~ "Pour mettre en œuvre le client " +#~ "Flower, nous créons une sous-classe " +#~ "de ``flwr.client.NumPyClient`` et mettons en" +#~ " œuvre les trois méthodes " +#~ "``get_parameters``, ``fit`` et ``evaluate`` :" + +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" +#~ "La fonction ``start_simulation`` accepte un" +#~ " certain nombre d'arguments, parmi lesquels" +#~ " le ``client_fn`` utilisé pour créer " +#~ "les instances ``FlowerClient``, le nombre " +#~ "de clients à simuler (``num_clients``), " +#~ "le nombre de tours d'apprentissage " +#~ "fédéré (``num_rounds``), et la stratégie. " +#~ "La stratégie encapsule l'approche/algorithme " +#~ "d'apprentissage fédéré, par exemple, " +#~ "*Federated Averaging* (FedAvg)." + +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" +#~ "La seule chose qui reste à faire" +#~ " est d'indiquer à la stratégie " +#~ "d'appeler cette fonction chaque fois " +#~ "qu'elle reçoit des dictionnaires de " +#~ "métriques d'évaluation de la part des" +#~ " clients :" + +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" + +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" + +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" + +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "" + +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "" + +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "" + +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "" + +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ "Nous allons maintenant montrer comment " -#~ "écrire un serveur qui utilise les " -#~ "scripts générés précédemment." -#~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ "Lorsqu'il fournit des certificats, le " -#~ "serveur attend un tuple de trois " -#~ "certificats. :code:`Path` peut être utilisé" -#~ " pour lire facilement le contenu de" -#~ " ces fichiers en chaînes d'octets, ce" -#~ " qui est le type de données " -#~ "attendu par :code:`start_server`." -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid "Flower server" -#~ msgstr "Serveur de Flower" +#~ msgid "run\\_client\\_app" +#~ msgstr "client" -#~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ msgid "Corresponding layout based on the query." #~ msgstr "" +#~ msgid "run\\_server\\_app" +#~ msgstr "serveur" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ msgid "The total number of clients in this simulation." #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "Let's build a federated learning system using fastai and Flower!" #~ msgstr "" - -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" - -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en savoir plus." #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " à l'aide des transformateurs Hugging " +#~ "Face et de Flower !" + +#~ msgid "Dependencies" +#~ msgstr "Dépendances" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" +#~ "Pour suivre ce tutoriel, tu devras " +#~ "installer les paquets suivants : " +#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " +#~ ":code:`torch`, et :code:`transformers`. Cela " +#~ "peut être fait en utilisant :code:`pip`" +#~ " :" -#~ msgid "Run Flower server (Driver API and Fleet API)." -#~ msgstr "" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "Flux de travail standard pour le visage" + +#~ msgid "Handling the data" +#~ msgstr "Traitement des données" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" +#~ "Pour récupérer le jeu de données " +#~ "IMDB, nous utiliserons la bibliothèque " +#~ ":code:`datasets` de Hugging Face. Nous " +#~ "devons ensuite tokeniser les données et" +#~ " créer des :code:`PyTorch` dataloaders, ce" +#~ " qui est fait dans la fonction " +#~ ":code:`load_data` :" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid "Training and testing the model" +#~ msgstr "Former et tester le modèle" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ "Flower 1.0 : ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" +#~ "Une fois que nous avons trouvé un" +#~ " moyen de créer notre trainloader et" +#~ " notre testloader, nous pouvons nous " +#~ "occuper de l'entraînement et du test." +#~ " C'est très similaire à n'importe " +#~ "quelle boucle d'entraînement ou de test" +#~ " :code:`PyTorch` :" -#~ msgid "`Driver` class provides an interface to the Driver API." -#~ msgstr "" +#~ msgid "Creating the model itself" +#~ msgstr "Créer le modèle lui-même" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" +#~ "Pour créer le modèle lui-même, " +#~ "nous allons simplement charger le modèle" +#~ " distillBERT pré-entraîné en utilisant le" +#~ " :code:`AutoModelForSequenceClassification` de Hugging" +#~ " Face :" -#~ msgid "Disconnect from the SuperLink if connected." -#~ msgstr "" +#~ msgid "Creating the IMDBClient" +#~ msgstr "Création du client IMDBC" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" +#~ "Pour fédérer notre exemple à plusieurs" +#~ " clients, nous devons d'abord écrire " +#~ "notre classe de client Flower (héritant" +#~ " de :code:`flwr.client.NumPyClient`). C'est très" +#~ " facile, car notre modèle est un " +#~ "modèle :code:`PyTorch` standard :" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." #~ msgstr "" +#~ "La fonction :code:`get_parameters` permet au" +#~ " serveur d'obtenir les paramètres du " +#~ "client. Inversement, la fonction " +#~ ":code:`set_parameters` permet au serveur " +#~ "d'envoyer ses paramètres au client. " +#~ "Enfin, la fonction :code:`fit` forme le" +#~ " modèle localement pour le client, et" +#~ " la fonction :code:`evaluate` teste le " +#~ "modèle localement et renvoie les mesures" +#~ " correspondantes." -#~ msgid "start\\_driver" -#~ msgstr "start_client" +#~ msgid "Starting the server" +#~ msgstr "Démarrer le serveur" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" +#~ "Maintenant que nous avons un moyen " +#~ "d'instancier les clients, nous devons " +#~ "créer notre serveur afin d'agréger les" +#~ " résultats. Avec Flower, cela peut " +#~ "être fait très facilement en choisissant" +#~ " d'abord une stratégie (ici, nous " +#~ "utilisons :code:`FedAvg`, qui définira les " +#~ "poids globaux comme la moyenne des " +#~ "poids de tous les clients à chaque" +#~ " tour) et en utilisant ensuite la " +#~ "fonction :code:`flwr.server.start_server` :" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." #~ msgstr "" +#~ "La fonction :code:`weighted_average` est là" +#~ " pour fournir un moyen d'agréger les" +#~ " mesures réparties entre les clients " +#~ "(en gros, cela nous permet d'afficher" +#~ " une belle moyenne de précision et" +#~ " de perte pour chaque tour)." -#~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." -#~ msgstr "" +#~ msgid "Putting everything together" +#~ msgstr "Tout assembler" -#~ msgid "The Driver object to use." +#~ msgid "We can now start client instances using:" #~ msgstr "" +#~ "Nous pouvons maintenant démarrer des " +#~ "instances de clients en utilisant :" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." #~ msgstr "" +#~ "Et ils pourront se connecter au " +#~ "serveur et démarrer la formation " +#~ "fédérée." -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" +#~ "Si tu veux voir tout ce qui " +#~ "est mis ensemble, tu devrais consulter" +#~ " l'exemple de code complet : " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" +#~ "Bien sûr, c'est un exemple très " +#~ "basique, et beaucoup de choses peuvent" +#~ " être ajoutées ou modifiées, il " +#~ "s'agissait juste de montrer avec quelle" +#~ " simplicité on pouvait fédérer un " +#~ "flux de travail Hugging Face à " +#~ "l'aide de Flower." -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" - -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "Simulation de moniteur" +#~ "Notez que dans cet exemple, nous " +#~ "avons utilisé :code:`PyTorch`, mais nous " +#~ "aurions très bien pu utiliser " +#~ ":code:`TensorFlow`." #~ msgid "" #~ "Check out this Federated Learning " #~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "" - -#~ msgid "Quickstart MXNet" -#~ msgstr "Démarrage rapide de MXNet" - -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un modèle :code:`Sequential` " -#~ "sur MNIST à l'aide de Flower et" -#~ " de MXNet." - -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant PyTorch Lightning et " +#~ "Flower !" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, lançons une formation " -#~ "distribuée simple avec deux clients et" -#~ " un serveur. Notre procédure de " -#~ "formation et l'architecture du réseau " -#~ "sont basées sur le tutoriel de " -#~ "reconnaissance de chiffres écrits à la" -#~ " main du MXNet " -#~ "`_." +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ "Dans un fichier appelé :code:`client.py`, " -#~ "importe Flower et les paquets liés " -#~ "au MXNet :" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en moins de 20 lignes de code" +#~ " !" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" #~ msgstr "" -#~ "Nous utilisons MXNet pour charger MNIST," -#~ " un ensemble de données de " -#~ "classification d'images populaire de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " -#~ "télécharge les données d'entraînement et " -#~ "de test." +#~ "Comme nous voulons utiliser l'API Keras" +#~ " de TensorFlow (TF), nous devons " +#~ "également installer TF :" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" #~ msgstr "" -#~ "Définis l'entraînement et la perte avec" -#~ " MXNet. Nous entraînons le modèle en" -#~ " parcourant en boucle l'ensemble des " -#~ "données, nous mesurons la perte " -#~ "correspondante et nous l'optimisons." +#~ "Ensuite, dans un fichier appelé " +#~ ":code:`client.py`, importe Flower et " +#~ "TensorFlow :" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ "Ensuite, nous définissons la validation " -#~ "de notre modèle d'apprentissage automatique." -#~ " Nous effectuons une boucle sur " -#~ "l'ensemble de test et mesurons à " -#~ "la fois la perte et la précision" -#~ " sur l'ensemble de test." +#~ "Nous utilisons les utilitaires Keras de" +#~ " TF pour charger CIFAR10, un ensemble" +#~ " de données de classification d'images " +#~ "colorées populaire pour l'apprentissage " +#~ "automatique. L'appel à " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " +#~ "CIFAR10, le met en cache localement, " +#~ "puis renvoie l'ensemble d'entraînement et " +#~ "de test sous forme de NumPy " +#~ "ndarrays." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" #~ msgstr "" -#~ "Après avoir défini la formation et " -#~ "le test d'un modèle d'apprentissage " -#~ "automatique MXNet, nous utilisons ces " -#~ "fonctions pour mettre en œuvre un " -#~ "client Flower." - -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" +#~ "Ensuite, nous avons besoin d'un modèle." +#~ " Pour les besoins de ce tutoriel, " +#~ "nous utilisons MobilNetV2 avec 10 " +#~ "classes de sortie :" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" -#~ "Après avoir chargé l'ensemble de données" -#~ " avec :code:`load_data()`, nous effectuons " -#~ "une propagation vers l'avant pour " -#~ "initialiser le modèle et les paramètres" -#~ " du modèle avec :code:`model(init)`. " -#~ "Ensuite, nous implémentons un client " -#~ "Flower." +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour former le réseau neuronal que " +#~ "nous avons défini plus tôt)." #~ msgid "" #~ "Flower provides a convenience class " #~ "called :code:`NumPyClient` which makes it " #~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" #~ "Flower fournit une classe de commodité" #~ " appelée :code:`NumPyClient` qui facilite " -#~ "l'implémentation de l'interface :code:`Client` " -#~ "lorsque ta charge de travail utilise " -#~ "MXNet. L'implémentation de :code:`NumPyClient` " -#~ "signifie généralement la définition des " -#~ "méthodes suivantes (:code:`set_parameters` est " -#~ "cependant facultatif) :" - -#~ msgid "They can be implemented in the following way:" -#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise Keras. L'interface " +#~ ":code:`NumPyClient` définit trois méthodes qui" +#~ " peuvent être mises en œuvre de " +#~ "la manière suivante :" #~ msgid "" #~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " +#~ " our class :code:`CifarClient` and add " #~ "one line to actually run this " #~ "client:" #~ msgstr "" #~ "Nous pouvons maintenant créer une " -#~ "instance de notre classe :code:`MNISTClient`" +#~ "instance de notre classe :code:`CifarClient`" #~ " et ajouter une ligne pour exécuter" #~ " ce client :" @@ -35296,2381 +37764,4243 @@ msgstr "" #~ "That's it for the client. We only" #~ " have to implement :code:`Client` or " #~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" #~ "C'est tout pour le client. Il nous" #~ " suffit d'implémenter :code:`Client` ou " #~ ":code:`NumPyClient` et d'appeler " #~ ":code:`fl.client.start_client()`. La chaîne " -#~ ":code:`\"0.0.0:8080\"` indique au client à " -#~ "quel serveur se connecter. Dans notre" -#~ " cas, nous pouvons exécuter le " +#~ ":code:`\"[: :]:8080\"` indique au client " +#~ "à quel serveur se connecter. Dans " +#~ "notre cas, nous pouvons exécuter le " #~ "serveur et le client sur la même" #~ " machine, c'est pourquoi nous utilisons " -#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" -#~ " charge de travail véritablement fédérée" -#~ " avec le serveur et les clients " -#~ "s'exécutant sur des machines différentes, " -#~ "tout ce qui doit changer est " -#~ ":code:`server_address` que nous transmettons " -#~ "au client." +#~ ":code:`\"[: :]:8080\"`. Si nous exécutons " +#~ "une charge de travail véritablement " +#~ "fédérée avec le serveur et les " +#~ "clients fonctionnant sur des machines " +#~ "différentes, tout ce qui doit changer" +#~ " est l'adresse :code:`server_address` vers " +#~ "laquelle nous dirigeons le client." + +#~ msgid "Each client will have its own dataset." +#~ msgstr "Chaque client aura son propre ensemble de données." #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" -#~ "Le client et le serveur étant " -#~ "prêts, nous pouvons maintenant tout " -#~ "exécuter et voir l'apprentissage fédéré " -#~ "en action. Les systèmes d'apprentissage " -#~ "fédéré ont généralement un serveur et" -#~ " plusieurs clients. Nous devons donc " -#~ "commencer par démarrer le serveur :" +#~ "Tu devrais maintenant voir comment la" +#~ " formation se déroule dans le tout" +#~ " premier terminal (celui qui a " +#~ "démarré le serveur) :" #~ msgid "" #~ "Congratulations! You've successfully built and" #~ " run your first federated learning " #~ "system. The full `source code " #~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "tensorflow/client.py>`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" #~ "Félicitations ! Tu as réussi à " #~ "construire et à faire fonctionner ton" #~ " premier système d'apprentissage fédéré. Le" -#~ " code source complet " +#~ " `code source complet " #~ "`_ de cet exemple se " -#~ "trouve dans :code:`examples/quickstart-mxnet`." +#~ "tensorflow/client.py>`_ pour cela se trouve" +#~ " dans :code:`examples/quickstart-tensorflow/client.py`." -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "Mélange les données et leur étiquette" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ "Nous chargeons l'ensemble de données " -#~ "MNIST de `OpenML `_," -#~ " un ensemble de données de " -#~ "classification d'images populaires de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " -#~ "les données d'entraînement et de test." -#~ " L'ensemble d'entraînement est ensuite " -#~ "divisé en 10 partitions avec " -#~ ":code:`utils.partition()`." -#~ msgid "Let's get stated!" -#~ msgstr "Allons-y, déclarons-le !" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|163117eb654a4273babba413cf8065f5|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" #~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub `_" -#~ " that include all necessary dependencies" -#~ " for running the SuperLink. You can" -#~ " also build your own custom docker" -#~ " images from scratch with a different" -#~ " version of Python or Ubuntu if " -#~ "that is what you need. In this " -#~ "guide, we will explain what images " -#~ "exist and how to build them " -#~ "locally." +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." +#~ msgstr "" + +#~ msgid "" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" +#~ msgstr "" + +#~ msgid "" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" +#~ msgstr "" + +#~ msgid "" +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" +#~ msgstr "" + +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." +#~ msgstr "" + +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgstr "" + +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." #~ msgstr "" #~ msgid "" -#~ "Both, base and SuperLink image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" #~ msgstr "" -#~ msgid "``3.11``" -#~ msgstr "1.0.0rc1" - -#~ msgid "``UBUNTU_VERSION``" +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" #~ msgstr "" -#~ msgid "Version of the official Ubuntu Docker image." +#~ msgid "or with ``mamba``::" #~ msgstr "" -#~ msgid "Defaults to ``22.04``." +#~ msgid "" +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" #~ msgid "" -#~ "The following example creates a base " -#~ "image with Python 3.11.0, pip 23.0.1 " -#~ "and setuptools 69.0.2:" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." #~ msgstr "" -#~ msgid "Building the SuperLink image" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Defaults to ``flwr/base``." +#~ msgid "" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "Évaluer la réponse d'un client." - -#~ msgid "Defaults to ``py3.11``." +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" #~ msgstr "" -#~ msgid "The PyPI package to install." +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Flux de travail" - #~ msgid "" -#~ "The following example creates a " -#~ "SuperLink image with the official Flower" -#~ " base image py3.11-ubuntu22.04 and Flower" -#~ " 1.8.0:" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY``, " -#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " -#~ "arguments." +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" -#~ msgid "Creating New Messages" -#~ msgstr "Création de nouveaux messages" - #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ "Voici un guide simple pour créer " -#~ "un nouveau type de message entre " -#~ "le serveur et les clients dans " -#~ "Flower." #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" -#~ "Supposons que nous ayons les fonctions" -#~ " suivantes dans :code:`server.py` et " -#~ ":code:`numpy_client.py`..." - -#~ msgid "Server's side:" -#~ msgstr "Côté serveur :" - -#~ msgid "Client's side:" -#~ msgstr "Côté client :" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" -#~ "Voyons maintenant ce que nous devons " -#~ "mettre en œuvre pour que cette " -#~ "simple fonction entre le serveur et " -#~ "le client fonctionne !" - -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "Types de messages pour les tampons de protocole" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" -#~ "La première chose à faire est de" -#~ " définir un type de message pour " -#~ "le système RPC dans :code:`transport.proto`." -#~ " Notez que nous devons le faire " -#~ "à la fois pour les messages de " -#~ "demande et de réponse. Pour plus " -#~ "de détails sur la syntaxe de " -#~ "proto3, veuillez consulter la `documentation" -#~ " officielle `_." - -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "Dans le bloc :code:`ServerMessage` :" - -#~ msgid "Within the ClientMessage block:" -#~ msgstr "Dans le bloc ClientMessage :" #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" -#~ "Veille à ajouter également un champ " -#~ "du type de message nouvellement créé " -#~ "dans :code:`oneof msg`." - -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" - -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" - -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "Fonctions de sérialisation et de désérialisation" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" -#~ "La prochaine étape consiste à ajouter" -#~ " des fonctions pour sérialiser et " -#~ "désérialiser les types de données Python" -#~ " vers ou à partir des types de" -#~ " messages RPC définis. Tu dois " -#~ "ajouter ces fonctions dans :code:`serde.py`." - -#~ msgid "The four functions:" -#~ msgstr "Les quatre fonctions :" - -#~ msgid "Sending the Message from the Server" -#~ msgstr "Envoi du message à partir du serveur" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" -#~ "Écris maintenant la fonction de demande" -#~ " dans ta classe Client Proxy (par " -#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" -#~ " les fonctions serde que tu viens " -#~ "de créer :" - -#~ msgid "Receiving the Message by the Client" -#~ msgstr "Réception du message par le client" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" -#~ "Dernière étape ! Modifie le code " -#~ "dans :code:`message_handler.py` pour vérifier " -#~ "le champ de ton message et appeler" -#~ " la fonction :code:`example_response`. N'oublie" -#~ " pas d'utiliser les fonctions serde !" - -#~ msgid "Within the handle function:" -#~ msgstr "Dans le cadre de la fonction de poignée :" -#~ msgid "And add a new function:" -#~ msgstr "Et ajoute une nouvelle fonction :" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" -#~ "Avec un peu de chance, lorsque tu" -#~ " exécuteras ton programme, tu obtiendras" -#~ " le résultat escompté !" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" #~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" #~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ msgid "Edge Client Engine" -#~ msgstr "Moteur client Edge" - -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Edge Client Engine" +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ "`Flower `_ architecture de " -#~ "base avec Edge Client Engine" - -#~ msgid "Virtual Client Engine" -#~ msgstr "Moteur de client virtuel" -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Virtual Client Engine" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ "`Flower `_ architecture de " -#~ "base avec moteur de client virtuel" -#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ "Moteur client virtuel et moteur client" -#~ " Edge dans la même charge de " -#~ "travail" -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with both Virtual Client " -#~ "Engine and Edge Client Engine" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ "`Flower `_ architecture de " -#~ "base avec un moteur de client " -#~ "virtuel et un moteur de client " -#~ "périphérique" -#~ msgid "How to build Docker Flower images locally" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "Clone the flower repository." -#~ msgstr "**Fourche le dépôt de Flower**" - -#~ msgid "" -#~ "Please follow the first section on " -#~ ":doc:`Run Flower using Docker ` which " -#~ "covers this step in more detail." +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "``22.04``" -#~ msgstr "1.0.0rc1" - -#~ msgid "``23.0.1``" -#~ msgstr "1.0.0rc1" - -#~ msgid "``69.0.2``" -#~ msgstr "``1.0.0b0``" - -#~ msgid "``1.8.0``" -#~ msgstr "``1.0.0b0``" - -#~ msgid "" -#~ "The following example creates a base " -#~ "Ubuntu/Alpine image with Python 3.11.0, " -#~ "pip 23.0.1, setuptools 69.0.2 and Flower" -#~ " 1.8.0:" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "" -#~ "The name of image is ``flwr_base`` " -#~ "and the tag ``0.1.0``. Remember that " -#~ "the build arguments as well as the" -#~ " name and tag can be adapted to" -#~ " your needs. These values serve as" -#~ " examples only." +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" #~ msgstr "" -#~ msgid "Building the SuperLink/SuperNode or ServerApp image" -#~ msgstr "Démarrer le serveur" - -#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ msgid "" -#~ "The following example creates a " -#~ "SuperLink/SuperNode or ServerApp image with" -#~ " the official Flower base image:" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY`` " -#~ "build argument." +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "Trigger the CI for building the Docker images." -#~ msgstr "Démarrer le serveur" - -#~ msgid "" -#~ "To trigger the workflow, a collaborator" -#~ " must create a ``workflow_dispatch`` event" -#~ " in the GitHub CI. This can be" -#~ " done either through the UI or " -#~ "via the GitHub CLI. The event " -#~ "requires only one input, the Flower " -#~ "version, to be released." +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid "**Via the UI**" -#~ msgstr "**Review the PR**" - -#~ msgid "" -#~ "Go to the ``Build docker images`` " -#~ "workflow `page " -#~ "`_." +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "" -#~ "Click on the ``Run workflow`` button " -#~ "and type the new version of Flower" -#~ " in the ``Version of Flower`` input" -#~ " field." +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" #~ msgstr "" -#~ msgid "**Via the GitHub CI**" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" #~ msgstr "" -#~ msgid "" -#~ "Make sure you are logged in via" -#~ " ``gh auth login`` and that the " -#~ "current working directory is the root" -#~ " of the Flower repository." +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ msgid "" -#~ "Trigger the workflow via ``gh workflow" -#~ " run docker-images.yml -f flwr-" -#~ "version=``." +#~ msgid "|e6ca84e1df244f238288a768352678e5|" #~ msgstr "" -#~ msgid "Preliminarities" +#~ msgid "|39c2422082554a21963baffb33a0d057|" #~ msgstr "" -#~ msgid "Example: JAX - Run JAX Federated" -#~ msgstr "Exemple : JAX - Exécuter JAX Federated" - -#~ msgid "" -#~ "\\small\n" -#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" #~ msgstr "" -#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" #~ msgstr "" -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__. Supported " -#~ "architectures include ``amd64`` and " -#~ "``arm64v8``." +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" #~ msgstr "" -#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" -#~ msgid "" -#~ "If you do not see the version " -#~ "of Docker but instead get an error" -#~ " saying that the command was not " -#~ "found, you will need to install " -#~ "Docker first. You can find installation" -#~ " instruction `here `_." +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "" -#~ "On Linux, Docker commands require " -#~ "``sudo`` privilege. If you want to " -#~ "avoid using ``sudo``, you can follow " -#~ "the `Post-installation steps " -#~ "`_" -#~ " on the official Docker website." +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "" -#~ "To ensure optimal performance and " -#~ "compatibility, the SuperLink, SuperNode and" -#~ " ServerApp image must have the same" -#~ " version when running together. This " -#~ "guarantees seamless integration and avoids " -#~ "potential conflicts or issues that may" -#~ " arise from using different versions." +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "Flower SuperLink" -#~ msgstr "flower-superlink" - -#~ msgid "Quickstart" -#~ msgstr "Démarrage rapide de JAX" - -#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "" -#~ "The command pulls the Docker image " -#~ "with the tag ``1.8.0`` from Docker " -#~ "Hub. The tag specifies the Flower " -#~ "version. In this case, Flower 1.8.0. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "" -#~ "By default, the Flower SuperLink keeps" -#~ " state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower SuperLink. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `__ when " -#~ "deploying to a production environment." +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" #~ msgstr "" -#~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the SuperLink " -#~ "supports:" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "Mounting a volume to store the state on the host system" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a directory where you want " -#~ "to save the file on your host " -#~ "system and a name for the database" -#~ " file. By default, the SuperLink " -#~ "container runs with a non-root " -#~ "user called ``app`` with the user " -#~ "ID ``49999``. It is recommended to " -#~ "create new directory and change the " -#~ "user ID of the directory to " -#~ "``49999`` to ensure the mounted " -#~ "directory has the proper permissions. If" -#~ " you later want to delete the " -#~ "directory, you can change the user " -#~ "ID back to the current user ID " -#~ "by running ``sudo chown -R $USER:$(id" -#~ " -gn) state``." +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ msgid "" -#~ "In the example below, we create a" -#~ " new directory, change the user ID" -#~ " and tell Docker via the flag " -#~ "``--volume`` to mount the local " -#~ "``state`` directory into the ``/app/state``" -#~ " directory of the container. Furthermore," -#~ " we use the flag ``--database`` to" -#~ " specify the name of the database " -#~ "file." +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the ``state`` directory on your host " -#~ "system. If the file already exists, " -#~ "the SuperLink tries to restore the " -#~ "state from the file. To start the" -#~ " SuperLink with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" #~ msgstr "" -#~ msgid "" -#~ "To enable SSL, you will need a " -#~ "PEM-encoded root certificate, a PEM-" -#~ "encoded private key and a PEM-" -#~ "encoded certificate chain." +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/certificates/`` directory of the " -#~ "container. This allows the SuperLink to" -#~ " access the files within the " -#~ "container. The ``ro`` stands for " -#~ "``read-only``. Docker volumes default to" -#~ " ``read-write``; that option tells " -#~ "Docker to make the volume ``read-" -#~ "only`` instead. Finally, we pass the " -#~ "names of the certificates and key " -#~ "file to the SuperLink with the " -#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " -#~ "and ``--ssl-keyfile`` flag." +#~ "Include SecAgg, SecAgg+, and LightSecAgg " +#~ "protocol. The LightSecAgg protocol has " +#~ "not been implemented yet, so its " +#~ "diagram and abstraction may not be " +#~ "accurate in practice. The SecAgg " +#~ "protocol can be considered as a " +#~ "special case of the SecAgg+ protocol." #~ msgstr "" +#~ "Inclut les protocoles SecAgg, SecAgg+ et" +#~ " LightSecAgg. Le protocole LightSecAgg n'a" +#~ " pas encore été mis en œuvre, " +#~ "de sorte que son diagramme et son" +#~ " abstraction peuvent ne pas être " +#~ "exacts dans la pratique. Le protocole" +#~ " SecAgg peut être considéré comme un" +#~ " cas particulier du protocole SecAgg+." -#~ msgid "" -#~ "Because Flower containers, by default, " -#~ "run with a non-root user ``app``," -#~ " the mounted files and directories " -#~ "must have the proper permissions for " -#~ "the user ID ``49999``. For example, " -#~ "to change the user ID of all " -#~ "files in the ``certificates/`` directory, " -#~ "you can run ``sudo chown -R " -#~ "49999:49999 certificates/*``." -#~ msgstr "" +#~ msgid "The ``SecAgg+`` abstraction" +#~ msgstr "L'abstraction :code:`SecAgg+`" #~ msgid "" -#~ "The SuperNode Docker image comes with" -#~ " a pre-installed version of Flower" -#~ " and serves as a base for " -#~ "building your own SuperNode image." +#~ "In this implementation, each client will" +#~ " be assigned with a unique index " +#~ "(int) for secure aggregation, and thus" +#~ " many python dictionaries used have " +#~ "keys of int type rather than " +#~ "ClientProxy type." #~ msgstr "" +#~ "Dans cette implémentation, chaque client " +#~ "se verra attribuer un index unique " +#~ "(int) pour une agrégation sécurisée, et" +#~ " donc de nombreux dictionnaires python " +#~ "utilisés ont des clés de type int" +#~ " plutôt que de type ClientProxy." #~ msgid "" -#~ "The SuperNode Docker image currently " -#~ "works only with the 1.9.0-nightly " -#~ "release. A stable version will be " -#~ "available when Flower 1.9.0 (stable) " -#~ "gets released (ETA: May). A SuperNode" -#~ " nightly image must be paired with" -#~ " the corresponding SuperLink and ServerApp" -#~ " nightly images released on the same" -#~ " day. To ensure the versions are " -#~ "in sync, using the concrete tag, " -#~ "e.g., ``1.9.0.dev20240501`` instead of " -#~ "``nightly`` is recommended." +#~ "The Flower server will execute and " +#~ "process received results in the " +#~ "following order:" #~ msgstr "" +#~ "Le serveur Flower exécutera et traitera" +#~ " les résultats reçus dans l'ordre " +#~ "suivant :" -#~ msgid "" -#~ "We will use the ``quickstart-pytorch``" -#~ " example, which you can find in " -#~ "the Flower repository, to illustrate how" -#~ " you can dockerize your ClientApp." -#~ msgstr "" +#~ msgid "The ``LightSecAgg`` abstraction" +#~ msgstr "L'abstraction :code:`LightSecAgg`" + +#~ msgid "Types" +#~ msgstr "Types" #~ msgid "" -#~ "Before we can start, we need to" -#~ " meet a few prerequisites in our " -#~ "local development environment. You can " -#~ "skip the first part if you want" -#~ " to run your ClientApp instead of " -#~ "the ``quickstart-pytorch`` example." +#~ "Docker Compose is `installed " +#~ "`_." #~ msgstr "" -#~ msgid "Creating a SuperNode Dockerfile" -#~ msgstr "" +#~ msgid "Run the example:" +#~ msgstr "Fédérer l'exemple" -#~ msgid "Let's assume the following project layout:" +#~ msgid "Follow the logs of the SuperExec service:" #~ msgstr "" -#~ msgid "" -#~ "First, we need to create a " -#~ "``requirements.txt`` file in the directory " -#~ "where the ``ClientApp`` code is located." -#~ " In the file, we list all the" -#~ " dependencies that the ClientApp requires." +#~ msgid "Only runs on AMD64." #~ msgstr "" #~ msgid "" -#~ "Note that `flwr `__" -#~ " is already installed in the " -#~ "``flwr/supernode`` base image, so you " -#~ "only need to include other package " -#~ "dependencies in your ``requirements.txt``, " -#~ "such as ``torch``, ``tensorflow``, etc." +#~ "Use the method that works best for" +#~ " you to copy the ``server`` " +#~ "directory, the certificates, and your " +#~ "Flower project to the remote machine." #~ msgstr "" #~ msgid "" -#~ "Next, we create a Dockerfile. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.supernode`` in ``examples/quickstart-" -#~ "pytorch``." +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``server`` Docker Compose files." #~ msgstr "" #~ msgid "" -#~ "The ``Dockerfile.supernode`` contains the " -#~ "instructions that assemble the SuperNode " -#~ "image." +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``client`` Docker Compose files." #~ msgstr "" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the SuperNode " -#~ "image tagged ``nightly`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. Next, we install the" -#~ " ClientApp dependencies by copying the " -#~ "``requirements.txt`` file into the image " -#~ "and run ``pip install``. In the " -#~ "last two lines, we copy the " -#~ "``client.py`` module into the image and" -#~ " set the entry point to ``flower-" -#~ "client-app`` with the argument " -#~ "``client:app``. The argument is the " -#~ "object reference of the ClientApp " -#~ "(``:``) that will be run" -#~ " inside the ClientApp." +#~ "The Path of the ``root-certificates``" +#~ " should be relative to the location" +#~ " of the ``pyproject.toml`` file." #~ msgstr "" -#~ msgid "Building the SuperNode Docker image" -#~ msgstr "Démarrer le serveur" - -#~ msgid "" -#~ "Next, we build the SuperNode Docker " -#~ "image by running the following command" -#~ " in the directory where Dockerfile " -#~ "and ClientApp code are located." +#~ msgid "To run the project, execute:" #~ msgstr "" -#~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_supernode``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." +#~ msgid "Run the ``quickstart-docker`` project by executing the command:" #~ msgstr "" -#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgid "Follow the SuperExec logs to track the execution of the run:" #~ msgstr "" -#~ msgid "Let's break down each part of this command:" +#~ msgid "Execute the command to run the quickstart example:" #~ msgstr "" -#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgid "Monitor the SuperExec logs and wait for the summary to appear:" #~ msgstr "" -#~ msgid "" -#~ "``--rm``: This option specifies that the" -#~ " container should be automatically removed" -#~ " when it stops." -#~ msgstr "" +#~ msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#~ msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" -#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -#~ msgstr "" +#~ msgid "Centralized Training" +#~ msgstr "Formation centralisée" -#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgid "" +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called ``cifar.py``, revised part is " +#~ "shown below:" #~ msgstr "" +#~ "Tous les fichiers sont révisés sur " +#~ "la base de `Exemple : PyTorch -" +#~ " From Centralized To Federated " +#~ "`_. La seule " +#~ "chose à faire est de modifier le" +#~ " fichier appelé :code:`cifar.py`, la partie" +#~ " révisée est montrée ci-dessous :" #~ msgid "" -#~ "``--superlink 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "The model architecture defined in class" +#~ " Net() is added with Batch " +#~ "Normalization layers accordingly." #~ msgstr "" +#~ "L'architecture du modèle définie dans la" +#~ " classe Net() est ajoutée avec les" +#~ " couches de normalisation par lots en" +#~ " conséquence." -#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgid "You can now run your machine learning workload:" #~ msgstr "" +#~ "Tu peux maintenant exécuter ta charge" +#~ " de travail d'apprentissage automatique :" #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your SuperNodes." +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the system consists of one " +#~ "server and two clients." +#~ msgstr "" +#~ "Jusqu'à présent, tout ceci devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé PyTorch. Passons à l'étape" +#~ " suivante et utilisons ce que nous" +#~ " avons construit pour créer un " +#~ "système d'apprentissage fédéré au sein " +#~ "de FedBN, le système se compose " +#~ "d'un serveur et de deux clients." + +#~ msgid "Federated Training" +#~ msgstr "Formation fédérée" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only ``get_parameters`` " +#~ "and ``set_parameters`` function in " +#~ "``client.py`` needed to revise. If not," +#~ " please read the :doc:`Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`. first." +#~ msgstr "" +#~ "Si vous avez lu `Exemple : PyTorch" +#~ " - From Centralized To Federated " +#~ "`_, les parties " +#~ "suivantes sont faciles à suivre, seules" +#~ " les fonctions :code:`get_parameters` et " +#~ ":code:`set_parameters` dans :code:`client.py` ont" +#~ " besoin d'être révisées. Si ce n'est" +#~ " pas le cas, veuillez lire `Exemple" +#~ " : PyTorch - From Centralized To " +#~ "Federated `. d'abord." + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ "``server.py`` keeps unchanged, we can " +#~ "start the server directly." +#~ msgstr "" +#~ "Notre exemple consiste en un *serveur*" +#~ " et deux *clients*. Dans FedBN, " +#~ ":code:`server.py` reste inchangé, nous pouvons" +#~ " démarrer le serveur directement." + +#~ msgid "Now, you can now open two additional terminal windows and run" +#~ msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning with FedBN strategy across two" +#~ " clients. Congratulations!" #~ msgstr "" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur est toujours en cours " +#~ "d'exécution avant de le faire) et " +#~ "tu verras ton projet PyTorch (auparavant" +#~ " centralisé) exécuter l'apprentissage fédéré " +#~ "avec la stratégie FedBN sur deux " +#~ "clients. Félicitations !" + +#~ msgid "Example: PyTorch - From Centralized To Federated" +#~ msgstr "Exemple : PyTorch - De la centralisation à la fédération" #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "SuperNode binary. To see all available" -#~ " flags that the SuperNode supports, " -#~ "run:" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload. We are using PyTorch to" +#~ " train a Convolutional Neural Network " +#~ "on the CIFAR-10 dataset. First, we " +#~ "introduce this machine learning task " +#~ "with a centralized training approach " +#~ "based on the `Deep Learning with " +#~ "PyTorch " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" +#~ "Ce tutoriel te montrera comment utiliser" +#~ " Flower pour construire une version " +#~ "fédérée d'une charge de travail " +#~ "d'apprentissage automatique existante. Nous " +#~ "utilisons PyTorch pour entraîner un " +#~ "réseau neuronal convolutif sur l'ensemble " +#~ "de données CIFAR-10. Tout d'abord, nous" +#~ " présentons cette tâche d'apprentissage " +#~ "automatique avec une approche d'entraînement" +#~ " centralisée basée sur le tutoriel " +#~ "`Deep Learning with PyTorch " +#~ "`_." +#~ " Ensuite, nous nous appuyons sur le" +#~ " code d'entraînement centralisé pour " +#~ "exécuter l'entraînement de manière fédérée." #~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your SuperNode container." +#~ "We begin with a brief description " +#~ "of the centralized CNN training code." +#~ " If you want a more in-depth" +#~ " explanation of what's going on then" +#~ " have a look at the official " +#~ "`PyTorch tutorial " +#~ "`_." #~ msgstr "" +#~ "Nous commençons par une brève " +#~ "description du code d'entraînement CNN " +#~ "centralisé. Si tu veux une explication" +#~ " plus approfondie de ce qui se " +#~ "passe, jette un coup d'œil au " +#~ "tutoriel officiel `PyTorch " +#~ "`_." #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flag when starting the container." +#~ "Let's create a new file called " +#~ "``cifar.py`` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as ``torch`` and" +#~ " ``torchvision``) need to be imported. " +#~ "You can see that we do not " +#~ "import any package for federated " +#~ "learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." +#~ msgstr "" +#~ "Créons un nouveau fichier appelé " +#~ ":code:`cifar.py` avec tous les composants " +#~ "requis pour une formation traditionnelle " +#~ "(centralisée) sur le CIFAR-10. Tout " +#~ "d'abord, tous les paquets requis (tels" +#~ " que :code:`torch` et :code:`torchvision`) " +#~ "doivent être importés. Tu peux voir " +#~ "que nous n'importons aucun paquet pour" +#~ " l'apprentissage fédéré. Tu peux conserver" +#~ " toutes ces importations telles quelles " +#~ "même lorsque nous ajouterons les " +#~ "composants d'apprentissage fédéré à un " +#~ "moment ultérieur." + +#~ msgid "" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in ``class Net()``." #~ msgstr "" +#~ "Comme nous l'avons déjà mentionné, nous" +#~ " utiliserons l'ensemble de données CIFAR-10" +#~ " pour cette charge de travail " +#~ "d'apprentissage automatique. L'architecture du " +#~ "modèle (un réseau neuronal convolutif " +#~ "très simple) est définie dans " +#~ ":code:`class Net()`." #~ msgid "" -#~ "The procedure for building and running" -#~ " a ServerApp image is almost " -#~ "identical to the SuperNode image." +#~ "The ``load_data()`` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ "``transform`` normalized the data after " +#~ "loading." #~ msgstr "" +#~ "La fonction :code:`load_data()` charge les " +#~ "ensembles d'entraînement et de test " +#~ "CIFAR-10. La fonction :code:`transform` " +#~ "normalise les données après leur " +#~ "chargement." #~ msgid "" -#~ "Similar to the SuperNode image, the " -#~ "ServerApp Docker image comes with a " -#~ "pre-installed version of Flower and " -#~ "serves as a base for building your" -#~ " own ServerApp image." +#~ "We now need to define the training" +#~ " (function ``train()``) which loops over" +#~ " the training set, measures the loss," +#~ " backpropagates it, and then takes " +#~ "one optimizer step for each batch " +#~ "of training examples." #~ msgstr "" +#~ "Nous devons maintenant définir la " +#~ "formation (fonction :code:`train()`) qui passe" +#~ " en boucle sur l'ensemble de la " +#~ "formation, mesure la perte, la " +#~ "rétropropage, puis effectue une étape " +#~ "d'optimisation pour chaque lot d'exemples " +#~ "de formation." #~ msgid "" -#~ "We will use the same ``quickstart-" -#~ "pytorch`` example as we do in the" -#~ " Flower SuperNode section. If you " -#~ "have not already done so, please " -#~ "follow the `SuperNode Prerequisites`_ before" -#~ " proceeding." +#~ "The evaluation of the model is " +#~ "defined in the function ``test()``. The" +#~ " function loops over all test samples" +#~ " and measures the loss of the " +#~ "model based on the test dataset." #~ msgstr "" +#~ "L'évaluation du modèle est définie dans" +#~ " la fonction :code:`test()`. La fonction" +#~ " boucle sur tous les échantillons de" +#~ " test et mesure la perte du " +#~ "modèle en fonction de l'ensemble des " +#~ "données de test." -#~ msgid "Creating a ServerApp Dockerfile" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our CNN on CIFAR-10." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, la " +#~ "formation et l'évaluation, nous pouvons " +#~ "tout mettre ensemble et former notre " +#~ "CNN sur CIFAR-10." #~ msgid "" -#~ "First, we need to create a " -#~ "Dockerfile in the directory where the" -#~ " ``ServerApp`` code is located. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" -#~ "pytorch``." +#~ "So far, this should all look " +#~ "fairly familiar if you've used PyTorch" +#~ " before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." #~ msgstr "" +#~ "Jusqu'à présent, tout cela devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé PyTorch. Passons à l'étape" +#~ " suivante et utilisons ce que nous" +#~ " avons construit pour créer un simple" +#~ " système d'apprentissage fédéré composé " +#~ "d'un serveur et de deux clients." + +#~ msgid "" +#~ "The simple machine learning project " +#~ "discussed in the previous section trains" +#~ " the model on a single dataset " +#~ "(CIFAR-10), we call this centralized " +#~ "learning. This concept of centralized " +#~ "learning, as shown in the previous " +#~ "section, is probably known to most " +#~ "of you, and many of you have " +#~ "used it previously. Normally, if you'd" +#~ " want to run machine learning " +#~ "workloads in a federated fashion, then" +#~ " you'd have to change most of " +#~ "your code and set everything up " +#~ "from scratch. This can be a " +#~ "considerable effort." +#~ msgstr "" +#~ "Le projet simple d'apprentissage automatique" +#~ " discuté dans la section précédente " +#~ "entraîne le modèle sur un seul " +#~ "ensemble de données (CIFAR-10), nous " +#~ "appelons cela l'apprentissage centralisé. Ce" +#~ " concept d'apprentissage centralisé, comme " +#~ "le montre la section précédente, est " +#~ "probablement connu de la plupart d'entre" +#~ " vous, et beaucoup d'entre vous l'ont" +#~ " déjà utilisé. Normalement, si tu " +#~ "veux exécuter des charges de travail " +#~ "d'apprentissage automatique de manière " +#~ "fédérée, tu dois alors changer la " +#~ "plupart de ton code et tout mettre" +#~ " en place à partir de zéro, ce" +#~ " qui peut représenter un effort " +#~ "considérable." + +#~ msgid "" +#~ "However, with Flower you can evolve " +#~ "your pre-existing code into a " +#~ "federated learning setup without the " +#~ "need for a major rewrite." +#~ msgstr "" +#~ "Cependant, avec Flower, tu peux faire" +#~ " évoluer ton code préexistant vers " +#~ "une configuration d'apprentissage fédéré sans" +#~ " avoir besoin d'une réécriture majeure." + +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in ``cifar.py``" +#~ " for the *clients* that are connected" +#~ " to the *server*. The *server* sends" +#~ " model parameters to the clients. The" +#~ " *clients* run the training and " +#~ "update the parameters. The updated " +#~ "parameters are sent back to the " +#~ "*server* which averages all received " +#~ "parameter updates. This describes one " +#~ "round of the federated learning process" +#~ " and we repeat this for multiple " +#~ "rounds." +#~ msgstr "" +#~ "Le concept est facile à comprendre. " +#~ "Nous devons démarrer un *serveur* et " +#~ "utiliser le code dans :code:`cifar.py` " +#~ "pour les *clients* qui sont connectés" +#~ " au *serveur*. Le *serveur* envoie " +#~ "les paramètres du modèle aux clients." +#~ " Les *clients* exécutent la formation " +#~ "et mettent à jour les paramètres. " +#~ "Les paramètres mis à jour sont " +#~ "renvoyés au *serveur* qui fait la " +#~ "moyenne de toutes les mises à jour" +#~ " de paramètres reçues. Ceci décrit un" +#~ " tour du processus d'apprentissage fédéré" +#~ " et nous répétons cette opération " +#~ "pour plusieurs tours." + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ "``server.py`` first. The *server* needs " +#~ "to import the Flower package ``flwr``." +#~ " Next, we use the ``start_server`` " +#~ "function to start a server and " +#~ "tell it to perform three rounds of" +#~ " federated learning." +#~ msgstr "" +#~ "Notre exemple consiste en un *serveur*" +#~ " et deux *clients*. Commençons par " +#~ "configurer :code:`server.py`. Le *serveur* " +#~ "doit importer le paquet Flower " +#~ ":code:`flwr`. Ensuite, nous utilisons la " +#~ "fonction :code:`start_server` pour démarrer un" +#~ " serveur et lui demander d'effectuer " +#~ "trois cycles d'apprentissage fédéré." + +#~ msgid "We can already start the *server*:" +#~ msgstr "Nous pouvons déjà démarrer le *serveur* :" #~ msgid "" -#~ "The ``Dockerfile.serverapp`` contains the " -#~ "instructions that assemble the ServerApp " -#~ "image." -#~ msgstr "" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined centralized " +#~ "training in ``cifar.py``. Our *client* " +#~ "needs to import ``flwr``, but also " +#~ "``torch`` to update the parameters on" +#~ " our PyTorch model:" +#~ msgstr "" +#~ "Enfin, nous allons définir notre logique" +#~ " *client* dans :code:`client.py` et nous" +#~ " appuyer sur la formation centralisée " +#~ "définie précédemment dans :code:`cifar.py`. " +#~ "Notre *client* doit importer :code:`flwr`, " +#~ "mais aussi :code:`torch` pour mettre à" +#~ " jour les paramètres de notre modèle" +#~ " PyTorch :" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the ServerApp " -#~ "image tagged ``1.8.0`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. In the last two " -#~ "lines, we copy the ``server.py`` module" -#~ " into the image and set the " -#~ "entry point to ``flower-server-app`` " -#~ "with the argument ``server:app``. The " -#~ "argument is the object reference of " -#~ "the ServerApp (``:``) that " -#~ "will be run inside the ServerApp " -#~ "container." +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``CifarClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. ``CifarClient``" +#~ " needs to implement four methods, two" +#~ " methods for getting/setting model " +#~ "parameters, one method for training the" +#~ " model, and one method for testing" +#~ " the model:" #~ msgstr "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`CifarClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. " +#~ ":code:`CifarClient` needs to implement four" +#~ " methods, two methods for getting/setting" +#~ " model parameters, one method for " +#~ "training the model, and one method " +#~ "for testing the model:" -#~ msgid "Building the ServerApp Docker image" -#~ msgstr "Démarrer le serveur" +#~ msgid "``set_parameters``" +#~ msgstr ":code:`set_parameters`" #~ msgid "" -#~ "Next, we build the ServerApp Docker " -#~ "image by running the following command" -#~ " in the directory where Dockerfile " -#~ "and ServerApp code are located." -#~ msgstr "" +#~ "set the model parameters on the " +#~ "local model that are received from " +#~ "the server" +#~ msgstr "règle les paramètres du modèle local reçus du serveur" #~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_serverapp``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." +#~ "loop over the list of model " +#~ "parameters received as NumPy ``ndarray``'s " +#~ "(think list of neural network layers)" #~ msgstr "" +#~ "boucle sur la liste des paramètres " +#~ "du modèle reçus sous forme de " +#~ "NumPy :code:`ndarray`'s (pensez à la " +#~ "liste des couches du réseau neuronal)" -#~ msgid "Running the ServerApp Docker image" -#~ msgstr "Démarrer le serveur" +#~ msgid "``get_parameters``" +#~ msgstr ":code:`get_parameters`" -#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ "``ndarray``'s (which is what " +#~ "``flwr.client.NumPyClient`` expects)" #~ msgstr "" +#~ "récupère les paramètres du modèle et " +#~ "les renvoie sous forme de liste de" +#~ " :code:`ndarray` NumPy (ce qui correspond" +#~ " à ce que :code:`flwr.client.NumPyClient` " +#~ "attend)" -#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgid "``fit``" #~ msgstr "" #~ msgid "" -#~ "``--superlink 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "update the parameters of the local " +#~ "model with the parameters received from" +#~ " the server" #~ msgstr "" +#~ "mettre à jour les paramètres du " +#~ "modèle local avec les paramètres reçus" +#~ " du serveur" + +#~ msgid "train the model on the local training set" +#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#~ msgid "get the updated local model weights and return them to the server" +#~ msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" + +#~ msgid "evaluate the updated model on the local test set" +#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#~ msgid "return the local loss and accuracy to the server" +#~ msgstr "renvoie la perte locale et la précision au serveur" #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your ServerApps." +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``test()`` " +#~ "previously defined in ``cifar.py``. So " +#~ "what we really do here is we " +#~ "tell Flower through our ``NumPyClient`` " +#~ "subclass which of our already defined" +#~ " functions to call for training and" +#~ " evaluation. We included type annotations" +#~ " to give you a better understanding" +#~ " of the data types that get " +#~ "passed around." #~ msgstr "" +#~ "Les deux méthodes :code:`NumPyClient` " +#~ ":code:`fit` et :code:`evaluate` utilisent les" +#~ " fonctions :code:`train()` et :code:`test()` " +#~ "définies précédemment dans :code:`cifar.py`. " +#~ "Ce que nous faisons vraiment ici, " +#~ "c'est que nous indiquons à Flower, " +#~ "par le biais de notre sous-classe" +#~ " :code:`NumPyClient`, laquelle de nos " +#~ "fonctions déjà définies doit être " +#~ "appelée pour l'entraînement et l'évaluation." +#~ " Nous avons inclus des annotations de" +#~ " type pour te donner une meilleure" +#~ " compréhension des types de données " +#~ "qui sont transmis." #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "ServerApp binary. To see all available" -#~ " flags that the ServerApp supports, " -#~ "run:" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ "``CifarClient``, and starts this client. " +#~ "You load your data and model by" +#~ " using ``cifar.py``. Start ``CifarClient`` " +#~ "with the function ``fl.client.start_client()`` " +#~ "by pointing it at the same IP " +#~ "address we used in ``server.py``:" #~ msgstr "" +#~ "Il ne reste plus qu'à définir une" +#~ " fonction qui charge le modèle et " +#~ "les données, crée un :code:`CifarClient` " +#~ "et démarre ce client. Tu charges " +#~ "tes données et ton modèle en " +#~ "utilisant :code:`cifar.py`. Démarre " +#~ ":code:`CifarClient` avec la fonction " +#~ ":code:`fl.client.start_client()` en la faisant " +#~ "pointer sur la même adresse IP que" +#~ " celle que nous avons utilisée dans" +#~ " :code:`server.py` :" -#~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your ServerApp container." +#~ msgid "And that's it. You can now open two additional terminal windows and run" #~ msgstr "" +#~ "Tu peux maintenant ouvrir deux autres" +#~ " fenêtres de terminal et exécuter les" +#~ " commandes suivantes" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flags when starting the container." +#~ "in each window (make sure that the" +#~ " server is running before you do " +#~ "so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning across two clients. Congratulations!" #~ msgstr "" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur fonctionne avant de le " +#~ "faire) et tu verras ton projet " +#~ "PyTorch (auparavant centralisé) exécuter " +#~ "l'apprentissage fédéré sur deux clients. " +#~ "Félicitations !" -#~ msgid "Run with root user privileges" +#~ msgid "" +#~ "The full source code for this " +#~ "example: `PyTorch: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is, of course, somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using different subsets of CIFAR-10 " +#~ "on each client? How about adding " +#~ "more clients?" #~ msgstr "" +#~ "Le code source complet de cet " +#~ "exemple : `PyTorch : From Centralized" +#~ " To Federated (Code) " +#~ "`_. Notre " +#~ "exemple est, bien sûr, un peu trop" +#~ " simplifié parce que les deux clients" +#~ " chargent exactement le même ensemble " +#~ "de données, ce qui n'est pas " +#~ "réaliste. Tu es maintenant prêt à " +#~ "explorer davantage ce sujet. Pourquoi ne" +#~ " pas utiliser différents sous-ensembles " +#~ "de CIFAR-10 sur chaque client ? " +#~ "Pourquoi ne pas ajouter d'autres clients" +#~ " ?" #~ msgid "" -#~ "Flower Docker images, by default, run" -#~ " with a non-root user " -#~ "(username/groupname: ``app``, UID/GID: ``49999``)." -#~ " Using root user is not recommended" -#~ " unless it is necessary for specific" -#~ " tasks during the build process. " -#~ "Always make sure to run the " -#~ "container as a non-root user in" -#~ " production to maintain security best " -#~ "practices." +#~ "To help you start and manage all" +#~ " of the concurrently executing training " +#~ "runs, Flower offers one additional " +#~ "long-running server-side service called " +#~ "**SuperExec**. When you type ``flwr " +#~ "run`` to start a new training run," +#~ " the ``flwr`` CLI bundles your local" +#~ " project (mainly your ``ServerApp`` and " +#~ "``ClientApp``) and sends it to the " +#~ "**SuperExec**. The **SuperExec** will then " +#~ "take care of starting and managing " +#~ "your ``ServerApp``, which in turn " +#~ "selects SuperNodes to execute your " +#~ "``ClientApp``." #~ msgstr "" -#~ msgid "**Run a container with root user privileges**" +#~ msgid "" +#~ "This architecture allows many users to" +#~ " (concurrently) run their projects on " +#~ "the same federation, simply by typing" +#~ " ``flwr run`` on their local " +#~ "developer machine." #~ msgstr "" -#~ msgid "**Run the build process with root user privileges**" +#~ msgid "Flower Deployment Engine with SuperExec" #~ msgstr "" -#~ msgid "Using a different Flower version" +#~ msgid "The SuperExec service for managing concurrent training runs in Flower." #~ msgstr "" -#~ msgid "Pinning a Docker image to a specific version" +#~ msgid "FED Template" +#~ msgstr "Modèle FED" + +#~ msgid "Table of Contents" +#~ msgstr "Table des matières" + +#~ msgid "[Table of Contents](#table-of-contents)" +#~ msgstr "[Table des matières](#table-of-contents)" + +#~ msgid "[Summary](#summary)" +#~ msgstr "[Résumé](#résumé)" + +#~ msgid "[Motivation](#motivation)" +#~ msgstr "[Motivation](#motivation)" + +#~ msgid "[Goals](#goals)" +#~ msgstr "[Buts](#buts)" + +#~ msgid "[Non-Goals](#non-goals)" +#~ msgstr "[Non-objectifs](#non-objectifs)" + +#~ msgid "[Proposal](#proposal)" +#~ msgstr "[Proposition](#proposition)" + +#~ msgid "[Drawbacks](#drawbacks)" +#~ msgstr "[Inconvénients](#inconvénients)" + +#~ msgid "[Alternatives Considered](#alternatives-considered)" +#~ msgstr "[Alternatives envisagées](#alternatives-considered)" + +#~ msgid "[Appendix](#appendix)" +#~ msgstr "[Annexe](#appendix)" + +#~ msgid "Summary" +#~ msgstr "Résumé" + +#~ msgid "\\[TODO - sentence 1: summary of the problem\\]" +#~ msgstr "[TODO - phrase 1 : résumé du problème]" + +#~ msgid "\\[TODO - sentence 2: summary of the solution\\]" +#~ msgstr "[TODO - phrase 2 : résumé de la solution]" + +#~ msgid "Motivation" +#~ msgstr "Motivation" + +#~ msgid "\\[TODO\\]" +#~ msgstr "[TODO]" + +#~ msgid "Goals" +#~ msgstr "Objectifs" + +#~ msgid "Non-Goals" +#~ msgstr "Non-objectifs" + +#~ msgid "Proposal" +#~ msgstr "Proposition" + +#~ msgid "Drawbacks" +#~ msgstr "Inconvénients" + +#~ msgid "Alternatives Considered" +#~ msgstr "Alternatives envisagées" + +#~ msgid "\\[Alternative 1\\]" +#~ msgstr "[Alternative 1]" + +#~ msgid "\\[Alternative 2\\]" +#~ msgstr "[Alternative 2]" + +#~ msgid "Flower Enhancement Doc" +#~ msgstr "Doc sur l'amélioration des fleurs" + +#~ msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#~ msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" + +#~ msgid "[Metadata](#metadata)" +#~ msgstr "[Métadonnées](#métadonnées)" + +#~ msgid "[Workflow](#workflow)" +#~ msgstr "[Workflow](#workflow)" + +#~ msgid "[GitHub Issues](#github-issues)" +#~ msgstr "[GitHub Issues](#github-issues)" + +#~ msgid "[Google Docs](#google-docs)" +#~ msgstr "[Google Docs](#google-docs)" + +#~ msgid "A Flower Enhancement is a standardized development process to" #~ msgstr "" +#~ "Une amélioration de la fleur est " +#~ "un processus de développement standardisé " +#~ "pour" -#~ msgid "" -#~ "It may happen that we update the" -#~ " images behind the tags. Such updates" -#~ " usually include security updates of " -#~ "system dependencies that should not " -#~ "change the functionality of Flower. " -#~ "However, if you want to ensure " -#~ "that you always use the same " -#~ "image, you can specify the hash of" -#~ " the image instead of the tag." +#~ msgid "provide a common structure for proposing larger changes" #~ msgstr "" +#~ "fournir une structure commune pour " +#~ "proposer des changements plus importants" -#~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``superlink:1.8.0`` tag:" +#~ msgid "ensure that the motivation for a change is clear" +#~ msgstr "s'assurer que la motivation du changement est claire" + +#~ msgid "persist project information in a version control system" #~ msgstr "" +#~ "conserver les informations sur le projet" +#~ " dans un système de contrôle des " +#~ "versions" -#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgid "document the motivation for impactful user-facing changes" #~ msgstr "" +#~ "documenter la motivation des changements " +#~ "qui ont un impact sur l'utilisateur" + +#~ msgid "reserve GitHub issues for tracking work in flight" +#~ msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" #~ msgid "" -#~ "To set a variable inside a Docker" -#~ " container, you can use the ``-e " -#~ "=`` flag." +#~ "ensure community participants can successfully" +#~ " drive changes to completion across " +#~ "one or more releases while stakeholders" +#~ " are adequately represented throughout the" +#~ " process" #~ msgstr "" +#~ "s'assurer que les participants de la " +#~ "communauté peuvent mener à bien les " +#~ "changements dans le cadre d'une ou " +#~ "plusieurs versions et que les parties" +#~ " prenantes sont représentées de manière " +#~ "adéquate tout au long du processus" + +#~ msgid "Hence, an Enhancement Doc combines aspects of" +#~ msgstr "Par conséquent, un document d'amélioration combine des aspects de" + +#~ msgid "a feature, and effort-tracking document" +#~ msgstr "une caractéristique, et un document de suivi des efforts" + +#~ msgid "a product requirements document" +#~ msgstr "un document sur les exigences du produit" + +#~ msgid "a design document" +#~ msgstr "un document de conception" #~ msgid "" -#~ "This approach consists of two seprate" -#~ " phases: clipping of the updates and" -#~ " adding noise to the aggregated " -#~ "model. For the clipping phase, Flower" -#~ " framework has made it possible to" -#~ " decide whether to perform clipping " -#~ "on the server side or the client" -#~ " side." +#~ "into one file, which is created " +#~ "incrementally in collaboration with the " +#~ "community." #~ msgstr "" +#~ "en un seul fichier, qui est créé" +#~ " progressivement en collaboration avec la" +#~ " communauté." -#~ msgid ":py:obj:`flwr.client `\\" +#~ msgid "" +#~ "For far-fetching changes or features " +#~ "proposed to Flower, an abstraction " +#~ "beyond a single GitHub issue or " +#~ "pull request is required to understand" +#~ " and communicate upcoming changes to " +#~ "the project." +#~ msgstr "" +#~ "Pour les changements lointains ou les" +#~ " fonctionnalités proposées à Flower, une" +#~ " abstraction au-delà d'une simple " +#~ "question GitHub ou d'une demande de " +#~ "tirage est nécessaire pour comprendre et" +#~ " communiquer les changements à venir " +#~ "dans le projet." + +#~ msgid "" +#~ "The purpose of this process is to" +#~ " reduce the amount of \"tribal " +#~ "knowledge\" in our community. By moving" +#~ " decisions from Slack threads, video " +#~ "calls, and hallway conversations into a" +#~ " well-tracked artifact, this process " +#~ "aims to enhance communication and " +#~ "discoverability." +#~ msgstr "" +#~ "L'objectif de ce processus est de " +#~ "réduire la quantité de \"connaissances " +#~ "tribales\" dans notre communauté. En " +#~ "déplaçant les décisions des fils de " +#~ "discussion Slack, des appels vidéo et" +#~ " des conversations de couloir vers un" +#~ " artefact bien suivi, ce processus " +#~ "vise à améliorer la communication et " +#~ "la découvrabilité." + +#~ msgid "" +#~ "Roughly any larger, user-facing " +#~ "enhancement should follow the Enhancement " +#~ "process. If an enhancement would be " +#~ "described in either written or verbal" +#~ " communication to anyone besides the " +#~ "author or developer, then consider " +#~ "creating an Enhancement Doc." +#~ msgstr "" +#~ "Si une amélioration doit être décrite" +#~ " par écrit ou verbalement à quelqu'un" +#~ " d'autre que l'auteur ou le " +#~ "développeur, il faut envisager de créer" +#~ " un document d'amélioration." + +#~ msgid "" +#~ "Similarly, any technical effort (refactoring," +#~ " major architectural change) that will " +#~ "impact a large section of the " +#~ "development community should also be " +#~ "communicated widely. The Enhancement process" +#~ " is suited for this even if it" +#~ " will have zero impact on the " +#~ "typical user or operator." +#~ msgstr "" +#~ "De même, tout effort technique " +#~ "(refactorisation, changement architectural majeur)" +#~ " qui aura un impact sur une " +#~ "grande partie de la communauté de " +#~ "développement doit également être communiqué" +#~ " à grande échelle. Le processus " +#~ "d'amélioration est adapté à cela, même" +#~ " s'il n'aura aucun impact sur " +#~ "l'utilisateur ou l'opérateur type." + +#~ msgid "" +#~ "For small changes and additions, going" +#~ " through the Enhancement process would " +#~ "be time-consuming and unnecessary. This" +#~ " includes, for example, adding new " +#~ "Federated Learning algorithms, as these " +#~ "only add features without changing how" +#~ " Flower works or is used." +#~ msgstr "" +#~ "Pour les petits changements et ajouts," +#~ " passer par le processus d'amélioration " +#~ "prendrait beaucoup de temps et serait" +#~ " inutile. Cela inclut, par exemple, " +#~ "l'ajout de nouveaux algorithmes " +#~ "d'apprentissage fédéré, car ceux-ci ne" +#~ " font qu'ajouter des fonctionnalités sans" +#~ " changer le fonctionnement ou l'utilisation" +#~ " de Flower." + +#~ msgid "" +#~ "Enhancements are different from feature " +#~ "requests, as they are already providing" +#~ " a laid-out path for implementation" +#~ " and are championed by members of " +#~ "the community." +#~ msgstr "" +#~ "Les améliorations sont différentes des " +#~ "demandes de fonctionnalités, car elles " +#~ "fournissent déjà un chemin tracé pour" +#~ " la mise en œuvre et sont " +#~ "défendues par les membres de la " +#~ "communauté." + +#~ msgid "" +#~ "An Enhancement is captured in a " +#~ "Markdown file that follows a defined " +#~ "template and a workflow to review " +#~ "and store enhancement docs for reference" +#~ " — the Enhancement Doc." +#~ msgstr "" +#~ "Une amélioration est capturée dans un" +#~ " fichier Markdown qui suit un modèle" +#~ " défini et un flux de travail " +#~ "pour examiner et stocker les documents" +#~ " d'amélioration pour référence - le " +#~ "Doc d'amélioration." + +#~ msgid "Enhancement Doc Template" +#~ msgstr "Modèle de document d'amélioration" + +#~ msgid "" +#~ "Each enhancement doc is provided as " +#~ "a Markdown file having the following " +#~ "structure" #~ msgstr "" +#~ "Chaque document d'amélioration est fourni " +#~ "sous la forme d'un fichier Markdown " +#~ "ayant la structure suivante" -#~ msgid ":py:obj:`flwr.common `\\" +#~ msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" #~ msgstr "" +#~ "Métadonnées (comme [décrit ci-" +#~ "dessous](#metadata) sous la forme d'un " +#~ "préambule YAML)" -#~ msgid ":py:obj:`flwr.server `\\" -#~ msgstr "" +#~ msgid "Title (same as in metadata)" +#~ msgstr "Titre (le même que dans les métadonnées)" -#~ msgid ":py:obj:`flwr.simulation `\\" -#~ msgstr "" +#~ msgid "Table of Contents (if needed)" +#~ msgstr "Table des matières (si nécessaire)" -#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" -#~ msgstr "" +#~ msgid "Notes/Constraints/Caveats (optional)" +#~ msgstr "Notes/Contraintes/Cavats (facultatif)" -#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" -#~ msgstr "serveur.stratégie.Stratégie" +#~ msgid "Design Details (optional)" +#~ msgstr "Détails de la conception (facultatif)" -#~ msgid ":py:obj:`Context `\\ \\(state\\)" -#~ msgstr "" +#~ msgid "Graduation Criteria" +#~ msgstr "Critères d'obtention du diplôme" -#~ msgid "State of your run." -#~ msgstr "" +#~ msgid "Upgrade/Downgrade Strategy (if applicable)" +#~ msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" -#~ msgid "Metrics record." -#~ msgstr "" +#~ msgid "As a reference, this document follows the above structure." +#~ msgstr "À titre de référence, ce document suit la structure ci-dessus." #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:class:`int` | " -#~ ":py:class:`float` | :py:class:`str` | " -#~ ":py:class:`bytes` | :py:class:`bool` | " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" +#~ "**fed-number** (Required) The `fed-" +#~ "number` of the last Flower Enhancement" +#~ " Doc + 1. With this number, it" +#~ " becomes easy to reference other " +#~ "proposals." #~ msgstr "" +#~ "**numérofed** (Obligatoire) Le `numérofed` du" +#~ " dernier document d'amélioration de la " +#~ "fleur + 1. Avec ce numéro, il " +#~ "devient facile de faire référence à " +#~ "d'autres propositions." -#~ msgid "Remove all items from R." -#~ msgstr "" +#~ msgid "**title** (Required) The title of the proposal in plain language." +#~ msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgid "" +#~ "**status** (Required) The current status " +#~ "of the proposal. See [workflow](#workflow) " +#~ "for the possible states." #~ msgstr "" +#~ "**status** (obligatoire) L'état actuel de " +#~ "la proposition. Voir [workflow](#workflow) " +#~ "pour les états possibles." -#~ msgid "d defaults to None." +#~ msgid "" +#~ "**authors** (Required) A list of authors" +#~ " of the proposal. This is simply " +#~ "the GitHub ID." #~ msgstr "" +#~ "**authors** (Obligatoire) Une liste des " +#~ "auteurs de la proposition, il s'agit " +#~ "simplement de l'identifiant GitHub." -#~ msgid "Update R from dict/iterable E and F." +#~ msgid "" +#~ "**creation-date** (Required) The date " +#~ "that the proposal was first submitted" +#~ " in a PR." #~ msgstr "" +#~ "**creation-date** (Obligatoire) Date à " +#~ "laquelle la proposition a été soumise" +#~ " pour la première fois dans un " +#~ "RP." #~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" +#~ "**last-updated** (Optional) The date " +#~ "that the proposal was last changed " +#~ "significantly." #~ msgstr "" +#~ "**dernière mise à jour** (Facultatif) La" +#~ " date à laquelle la proposition a " +#~ "été modifiée de manière significative " +#~ "pour la dernière fois." #~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" +#~ "**see-also** (Optional) A list of " +#~ "other proposals that are relevant to " +#~ "this one." #~ msgstr "" +#~ "**see-also** (Facultatif) Une liste " +#~ "d'autres propositions qui sont pertinentes " +#~ "par rapport à celle-ci." -#~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" +#~ msgid "**replaces** (Optional) A list of proposals that this one replaces." #~ msgstr "" +#~ "**replaces** (Facultatif) Une liste de " +#~ "propositions que celle-ci remplace." #~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" +#~ "**superseded-by** (Optional) A list of" +#~ " proposals that this one supersedes." #~ msgstr "" +#~ "**superseded-by** (Facultatif) Une liste " +#~ "de propositions que celle-ci remplace." -#~ msgid ":py:obj:`DRIVER_CONNECT `\\" -#~ msgstr "" +#~ msgid "Workflow" +#~ msgstr "Flux de travail" -#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" -#~ msgstr "" +#~ msgid "" +#~ "The idea forming the enhancement should" +#~ " already have been discussed or " +#~ "pitched in the community. As such, " +#~ "it needs a champion, usually the " +#~ "author, who shepherds the enhancement. " +#~ "This person also has to find " +#~ "committers to Flower willing to review" +#~ " the proposal." +#~ msgstr "" +#~ "L'idée à l'origine de l'amélioration " +#~ "doit déjà avoir fait l'objet d'une " +#~ "discussion ou d'une présentation au sein" +#~ " de la communauté. À ce titre, " +#~ "elle a besoin d'un champion, " +#~ "généralement l'auteur, qui se charge de" +#~ " l'amélioration. Cette personne doit " +#~ "également trouver des committers to " +#~ "Flower prêts à examiner la proposition." + +#~ msgid "" +#~ "New enhancements are checked in with " +#~ "a file name in the form of " +#~ "`NNNN-YYYYMMDD-enhancement-title.md`, with " +#~ "`NNNN` being the Flower Enhancement Doc" +#~ " number, to `enhancements`. All " +#~ "enhancements start in `provisional` state " +#~ "as part of a pull request. " +#~ "Discussions are done as part of " +#~ "the pull request review." +#~ msgstr "" +#~ "Les nouvelles améliorations sont enregistrées" +#~ " avec un nom de fichier de la" +#~ " forme `NNN-YYYMMDD-enhancement-title.md`," +#~ " `NNNN` étant le numéro du document" +#~ " d'amélioration de la fleur, dans " +#~ "`enhancements`. Toutes les améliorations " +#~ "commencent à l'état `provisionnel` dans " +#~ "le cadre d'une demande d'extraction. Les" +#~ " discussions sont effectuées dans le " +#~ "cadre de l'examen de la demande " +#~ "d'extraction." + +#~ msgid "" +#~ "Once an enhancement has been reviewed" +#~ " and approved, its status is changed" +#~ " to `implementable`. The actual " +#~ "implementation is then done in separate" +#~ " pull requests. These pull requests " +#~ "should mention the respective enhancement " +#~ "as part of their description. After " +#~ "the implementation is done, the proposal" +#~ " status is changed to `implemented`." +#~ msgstr "" +#~ "Une fois qu'une amélioration a été " +#~ "examinée et approuvée, son statut passe" +#~ " à `implémentable`. L'implémentation réelle " +#~ "est alors réalisée dans des demandes " +#~ "d'extension séparées. Ces demandes d'extension" +#~ " doivent mentionner l'amélioration concernée " +#~ "dans leur description. Une fois " +#~ "l'implémentation réalisée, le statut de " +#~ "la proposition passe à `implémented`." + +#~ msgid "" +#~ "Under certain conditions, other states " +#~ "are possible. An Enhancement has the " +#~ "following states:" +#~ msgstr "" +#~ "Sous certaines conditions, d'autres états " +#~ "sont possibles. Une amélioration a les" +#~ " états suivants :" + +#~ msgid "" +#~ "`provisional`: The enhancement has been " +#~ "proposed and is actively being defined." +#~ " This is the starting state while " +#~ "the proposal is being fleshed out " +#~ "and actively defined and discussed." +#~ msgstr "" +#~ "`provisoire` : L'amélioration a été " +#~ "proposée et est en cours de " +#~ "définition. C'est l'état de départ " +#~ "pendant que la proposition est étoffée" +#~ " et activement définie et discutée." + +#~ msgid "`implementable`: The enhancement has been reviewed and approved." +#~ msgstr "`implementable` : L'amélioration a été examinée et approuvée." + +#~ msgid "" +#~ "`implemented`: The enhancement has been " +#~ "implemented and is no longer actively" +#~ " changed." +#~ msgstr "" +#~ "`implemented` : L'amélioration a été " +#~ "mise en œuvre et n'est plus " +#~ "activement modifiée." + +#~ msgid "" +#~ "`deferred`: The enhancement is proposed " +#~ "but not actively being worked on." +#~ msgstr "" +#~ "`deferred` : L'amélioration est proposée " +#~ "mais n'est pas activement travaillée." + +#~ msgid "" +#~ "`rejected`: The authors and reviewers " +#~ "have decided that this enhancement is" +#~ " not moving forward." +#~ msgstr "" +#~ "`rejeté` : Les auteurs et les " +#~ "réviseurs ont décidé que cette " +#~ "amélioration n'allait pas de l'avant." + +#~ msgid "`withdrawn`: The authors have withdrawn the enhancement." +#~ msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." + +#~ msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#~ msgstr "" +#~ "`replaced` : L'amélioration a été " +#~ "remplacée par une nouvelle amélioration." + +#~ msgid "" +#~ "Adding an additional process to the " +#~ "ones already provided by GitHub (Issues" +#~ " and Pull Requests) adds more " +#~ "complexity and can be a barrier " +#~ "for potential first-time contributors." +#~ msgstr "" +#~ "L'ajout d'un processus supplémentaire à " +#~ "ceux déjà fournis par GitHub (Issues " +#~ "et Pull Requests) ajoute plus de " +#~ "complexité et peut constituer un " +#~ "obstacle pour les éventuels nouveaux " +#~ "contributeurs." + +#~ msgid "" +#~ "Expanding the proposal template beyond " +#~ "the single-sentence description currently " +#~ "required in the features issue template" +#~ " may be a heavy burden for " +#~ "non-native English speakers." +#~ msgstr "" +#~ "Élargir le modèle de proposition au-" +#~ "delà de la description d'une seule " +#~ "phrase actuellement requise dans le " +#~ "modèle de questions sur les " +#~ "caractéristiques peut constituer une lourde" +#~ " charge pour les personnes dont " +#~ "l'anglais n'est pas la langue " +#~ "maternelle." + +#~ msgid "GitHub Issues" +#~ msgstr "Questions sur GitHub" + +#~ msgid "" +#~ "Using GitHub Issues for these kinds " +#~ "of enhancements is doable. One could " +#~ "use, for example, tags, to differentiate" +#~ " and filter them from other issues." +#~ " The main issue is in discussing " +#~ "and reviewing an enhancement: GitHub " +#~ "issues only have a single thread " +#~ "for comments. Enhancements usually have " +#~ "multiple threads of discussion at the" +#~ " same time for various parts of " +#~ "the doc. Managing these multiple " +#~ "discussions can be confusing when using" +#~ " GitHub Issues." +#~ msgstr "" +#~ "Il est possible d'utiliser GitHub Issues" +#~ " pour ce type d'améliorations. On " +#~ "pourrait utiliser, par exemple, des " +#~ "balises pour les différencier et les " +#~ "filtrer par rapport aux autres " +#~ "problèmes. Le principal problème concerne " +#~ "la discussion et la révision d'une " +#~ "amélioration : les GitHub Issues n'ont" +#~ " qu'un seul fil de discussion pour" +#~ " les commentaires. Les améliorations ont" +#~ " généralement plusieurs fils de discussion" +#~ " en même temps pour différentes " +#~ "parties de la documentation. La gestion" +#~ " de ces multiples discussions peut " +#~ "être déroutante lorsque l'on utilise " +#~ "GitHub Issues." + +#~ msgid "Google Docs" +#~ msgstr "Google Docs" + +#~ msgid "" +#~ "Google Docs allow for multiple threads" +#~ " of discussions. But as Google Docs" +#~ " are hosted outside the project, " +#~ "their discoverability by the community " +#~ "needs to be taken care of. A " +#~ "list of links to all proposals has" +#~ " to be managed and made available " +#~ "for the community. Compared to shipping" +#~ " proposals as part of Flower's " +#~ "repository, the potential for missing " +#~ "links is much higher." +#~ msgstr "" +#~ "Les Google Docs permettent de multiplier" +#~ " les fils de discussion. Mais comme" +#~ " les Google Docs sont hébergés en " +#~ "dehors du projet, il faut veiller " +#~ "à ce que la communauté puisse les" +#~ " découvrir. Une liste de liens vers" +#~ " toutes les propositions doit être " +#~ "gérée et mise à la disposition de" +#~ " la communauté. Par rapport à l'envoi" +#~ " de propositions dans le cadre du " +#~ "référentiel de Flower, le risque de " +#~ "liens manquants est beaucoup plus élevé." + +#~ msgid "FED - Flower Enhancement Doc" +#~ msgstr "FED - Doc pour l'amélioration des fleurs" + +#~ msgid "" +#~ "Along with model parameters, Flower can" +#~ " send configuration values to clients. " +#~ "Configuration values can be used for " +#~ "various purposes. They are, for example," +#~ " a popular way to control client-" +#~ "side hyperparameters from the server." +#~ msgstr "" +#~ "En plus des paramètres du modèle, " +#~ "Flower peut envoyer des valeurs de " +#~ "configuration aux clients. Les valeurs " +#~ "de configuration peuvent être utilisées " +#~ "à diverses fins. Elles constituent, par" +#~ " exemple, un moyen populaire de " +#~ "contrôler les hyperparamètres côté client " +#~ "à partir du serveur." + +#~ msgid "" +#~ "Configuration values are represented as " +#~ "a dictionary with ``str`` keys and " +#~ "values of type ``bool``, ``bytes``, " +#~ "``double`` (64-bit precision float), ``int``," +#~ " or ``str`` (or equivalent types in" +#~ " different languages). Here is an " +#~ "example of a configuration dictionary in" +#~ " Python:" +#~ msgstr "" +#~ "Les valeurs de configuration sont " +#~ "représentées sous forme de dictionnaire " +#~ "avec des clés `str`` et des " +#~ "valeurs de type `bool`, `bytes`, " +#~ "`double` (float de précision 64 bits)," +#~ " `int`, ou `str` (ou des types " +#~ "équivalents dans d'autres langages). Voici " +#~ "un exemple de dictionnaire de " +#~ "configuration en Python :" + +#~ msgid "" +#~ "One can, for example, convert a " +#~ "list of floating-point numbers to " +#~ "a JSON string, then send the JSON" +#~ " string using the configuration dictionary," +#~ " and then convert the JSON string " +#~ "back to a list of floating-point" +#~ " numbers on the client." +#~ msgstr "" +#~ "On peut, par exemple, convertir une " +#~ "liste de nombres à virgule flottante " +#~ "en une chaîne JSON, puis envoyer " +#~ "la chaîne JSON à l'aide du " +#~ "dictionnaire de configuration, et enfin " +#~ "reconvertir la chaîne JSON en une " +#~ "liste de nombres à virgule flottante " +#~ "sur le client." + +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like ``FedAvg``. " +#~ "Built-in strategies support so-called " +#~ "configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." +#~ msgstr "" +#~ "La façon la plus simple d'envoyer " +#~ "des valeurs de configuration aux clients" +#~ " est d'utiliser une stratégie intégrée " +#~ "comme :code:`FedAvg`. Les stratégies intégrées" +#~ " prennent en charge ce que l'on " +#~ "appelle les fonctions de configuration. " +#~ "Une fonction de configuration est une" +#~ " fonction que la stratégie intégrée " +#~ "appelle pour obtenir le dictionnaire de" +#~ " configuration pour le tour en cours." +#~ " Elle transmet ensuite le dictionnaire " +#~ "de configuration à tous les clients " +#~ "sélectionnés au cours de ce tour." + +#~ msgid "" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter ``on_fit_config_fn``:" +#~ msgstr "" +#~ "Pour que les stratégies intégrées " +#~ "utilisent cette fonction, nous pouvons " +#~ "la passer à ``FedAvg`` lors de " +#~ "l'initialisation en utilisant le paramètre " +#~ ":code:`on_fit_config_fn` :" + +#~ msgid "" +#~ "One the client side, we receive " +#~ "the configuration dictionary in ``fit``:" +#~ msgstr "" +#~ "Côté client, nous recevons le " +#~ "dictionnaire de configuration dans ``fit`` " +#~ ":" #~ msgid "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" -#~ msgstr "" +#~ "There is also an `on_evaluate_config_fn` " +#~ "to configure evaluation, which works the" +#~ " same way. They are separate " +#~ "functions because one might want to " +#~ "send different configuration values to " +#~ "`evaluate` (for example, to use a " +#~ "different batch size)." +#~ msgstr "" +#~ "Il existe également une fonction " +#~ "`on_evaluate_config_fn` pour configurer " +#~ "l'évaluation, qui fonctionne de la même" +#~ " manière. Ce sont des fonctions " +#~ "séparées car on peut vouloir envoyer " +#~ "différentes valeurs de configuration à " +#~ "`evaluate` (par exemple, pour utiliser " +#~ "une taille de lot différente)." + +#~ msgid "" +#~ "The built-in strategies call this " +#~ "function every round (that is, every " +#~ "time `Strategy.configure_fit` or " +#~ "`Strategy.configure_evaluate` runs). Calling " +#~ "`on_evaluate_config_fn` every round allows us" +#~ " to vary/change the config dict over" +#~ " consecutive rounds. If we wanted to" +#~ " implement a hyperparameter schedule, for" +#~ " example, to increase the number of" +#~ " local epochs during later rounds, we" +#~ " could do the following:" +#~ msgstr "" +#~ "Les stratégies intégrées appellent cette " +#~ "fonction à chaque tour (c'est-à-dire à" +#~ " chaque fois que `Strategy.configure_fit` " +#~ "ou `Strategy.configure_evaluate` s'exécute). Appeler" +#~ " `on_evaluate_config_fn` à chaque tour nous" +#~ " permet de varier/changer le dict de" +#~ " config au cours de tours " +#~ "consécutifs. Si nous voulions mettre en" +#~ " place un calendrier d'hyperparamètres, par" +#~ " exemple, pour augmenter le nombre " +#~ "d'époques locales au cours des derniers" +#~ " tours, nous pourrions faire ce qui" +#~ " suit :" + +#~ msgid "The ``FedAvg`` strategy will call this function *every round*." +#~ msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." + +#~ msgid "Configuring individual clients" +#~ msgstr "Configuration des clients individuels" + +#~ msgid "" +#~ "In some cases, it is necessary to" +#~ " send different configuration values to " +#~ "different clients." +#~ msgstr "" +#~ "Dans certains cas, il est nécessaire " +#~ "d'envoyer des valeurs de configuration " +#~ "différentes à des clients différents." + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes ``FedAvg`` by adding a custom" +#~ " ``\"hello\": \"world\"`` configuration key/value" +#~ " pair to the config dict of a" +#~ " *single client* (only the first " +#~ "client in the list, the other " +#~ "clients in this round to not " +#~ "receive this \"special\" config value):" +#~ msgstr "" +#~ "Ceci peut être réalisé en personnalisant" +#~ " une stratégie existante ou en " +#~ "`mettant en œuvre une stratégie " +#~ "personnalisée à partir de zéro " +#~ "`_. Voici un exemple absurde" +#~ " qui personnalise :code:`FedAvg` en " +#~ "ajoutant une paire clé/valeur de " +#~ "configuration personnalisée ``\"hello\" : " +#~ "\"world\"`` au config dict d'un *seul" +#~ " client* (uniquement le premier client " +#~ "de la liste, les autres clients de" +#~ " cette série ne recevant pas cette" +#~ " valeur de configuration \"spéciale\") :" #~ msgid "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" +#~ "containing relevant information including: log" +#~ " message level (e.g. ``INFO``, ``DEBUG``)," +#~ " a timestamp, the line where the " +#~ "logging took place from, as well " +#~ "as the log message itself. In this" +#~ " way, the logger would typically " +#~ "display information on your terminal as" +#~ " follows:" #~ msgstr "" #~ msgid "" -#~ "An identifier that can be used " -#~ "when loading a particular data partition" -#~ " for a ClientApp. Making use of " -#~ "this identifier is more relevant when" -#~ " conducting simulations." +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ "``fl.server.start_server``) and when using the" +#~ " ``VirtualClientEngine`` (i.e. when you do" +#~ " ``fl.simulation.start_simulation``). In some " +#~ "situations you might want to save " +#~ "this log to disk. You can do " +#~ "so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" #~ msgstr "" -#~ msgid ":py:obj:`partition_id `\\" +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to ``log.txt``. This file will be" +#~ " created in the same directory as " +#~ "were you are running the code " +#~ "from. If we inspect we see the " +#~ "log above is also recorded but " +#~ "prefixing with ``identifier`` each line:" #~ msgstr "" +#~ "Avec ce qui précède, Flower enregistrera" +#~ " le log que vous voyez sur " +#~ "votre terminal dans :code:`log.txt`. Ce " +#~ "fichier sera créé dans le répertoire " +#~ "depuis lequel le code est exécuté. " +#~ "Si nous inspectons nous voyons que " +#~ "le log ci-dessous est également " +#~ "enregistré mais préfixé avec " +#~ ":code:`identifier` sur chaque ligne :" + +#~ msgid "" +#~ "The ``fl.common.logger.configure`` function, also" +#~ " allows specifying a host to which" +#~ " logs can be pushed (via ``POST``)" +#~ " through a native Python " +#~ "``logging.handler.HTTPHandler``. This is a " +#~ "particularly useful feature in ``gRPC``-based" +#~ " Federated Learning workloads where " +#~ "otherwise gathering logs from all " +#~ "entities (i.e. the server and the " +#~ "clients) might be cumbersome. Note that" +#~ " in Flower simulation, the server " +#~ "automatically displays all logs. You can" +#~ " still specify a ``HTTPHandler`` should " +#~ "you wish to backup or analyze the" +#~ " logs somewhere else." +#~ msgstr "" + +#~ msgid "Monitor simulation" +#~ msgstr "Simulation de moniteur" -#~ msgid "An identifier telling which data partition a ClientApp should use." -#~ msgstr "" +#~ msgid "" +#~ "Flower allows you to monitor system " +#~ "resources while running your simulation. " +#~ "Moreover, the Flower simulation engine " +#~ "is powerful and enables you to " +#~ "decide how to allocate resources per " +#~ "client manner and constrain the total" +#~ " usage. Insights from resource consumption" +#~ " can help you make smarter decisions" +#~ " and speed up the execution time." +#~ msgstr "" +#~ "Flower te permet de surveiller les " +#~ "ressources du système pendant l'exécution " +#~ "de ta simulation. De plus, le " +#~ "moteur de simulation de Flower est " +#~ "puissant et te permet de décider " +#~ "comment allouer les ressources par " +#~ "manière de client et de limiter " +#~ "l'utilisation totale. Les informations sur " +#~ "la consommation des ressources peuvent " +#~ "t'aider à prendre des décisions plus " +#~ "intelligentes et à accélérer le temps" +#~ " d'exécution." + +#~ msgid "" +#~ "The specific instructions assume you are" +#~ " using macOS and have the `Homebrew" +#~ " `_ package manager installed." +#~ msgstr "" +#~ "Les instructions spécifiques supposent que " +#~ "tu utilises macOS et que le " +#~ "gestionnaire de paquets `Homebrew " +#~ "`_ est installé." + +#~ msgid "Downloads" +#~ msgstr "Téléchargements" + +#~ msgid "" +#~ "`Prometheus `_ is used " +#~ "for data collection, while `Grafana " +#~ "`_ will enable you to" +#~ " visualize the collected data. They " +#~ "are both well integrated with `Ray " +#~ "`_ which Flower uses " +#~ "under the hood." +#~ msgstr "" +#~ "`Prometheus `_ est utilisé" +#~ " pour la collecte de données, tandis" +#~ " que `Grafana `_ te " +#~ "permettra de visualiser les données " +#~ "collectées. Ils sont tous deux bien " +#~ "intégrés à `Ray `_ que" +#~ " Flower utilise sous le capot." + +#~ msgid "" +#~ "Overwrite the configuration files (depending" +#~ " on your device, it might be " +#~ "installed on a different path)." +#~ msgstr "" +#~ "Écrase les fichiers de configuration " +#~ "(selon ton appareil, il se peut " +#~ "qu'il soit installé sur un chemin " +#~ "différent)." + +#~ msgid "If you are on an M1 Mac, it should be:" +#~ msgstr "Si tu es sur un Mac M1, il devrait l'être :" + +#~ msgid "On the previous generation Intel Mac devices, it should be:" +#~ msgstr "" +#~ "Sur les appareils Mac Intel de la" +#~ " génération précédente, ce devrait être " +#~ "le cas :" + +#~ msgid "" +#~ "Open the respective configuration files " +#~ "and change them. Depending on your " +#~ "device, use one of the two " +#~ "following commands:" +#~ msgstr "" +#~ "Ouvre les fichiers de configuration " +#~ "respectifs et modifie-les. Selon ton " +#~ "appareil, utilise l'une des deux " +#~ "commandes suivantes :" + +#~ msgid "" +#~ "and then delete all the text in" +#~ " the file and paste a new " +#~ "Prometheus config you see below. You " +#~ "may adjust the time intervals to " +#~ "your requirements:" +#~ msgstr "" +#~ "puis supprime tout le texte du " +#~ "fichier et colle une nouvelle " +#~ "configuration Prometheus que tu vois " +#~ "ci-dessous. Tu peux adapter les " +#~ "intervalles de temps à tes besoins " +#~ ":" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:class:`int` | " -#~ ":py:class:`float` | :py:class:`~typing.List`\\ " -#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " -#~ "[:py:class:`float`]]" +#~ "Now after you have edited the " +#~ "Prometheus configuration, do the same " +#~ "with the Grafana configuration files. " +#~ "Open those using one of the " +#~ "following commands as before:" #~ msgstr "" +#~ "Maintenant, après avoir édité la " +#~ "configuration de Prometheus, fais de " +#~ "même avec les fichiers de configuration" +#~ " de Grafana. Ouvre ces derniers à " +#~ "l'aide de l'une des commandes suivantes," +#~ " comme précédemment :" -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgid "" +#~ "Your terminal editor should open and " +#~ "allow you to apply the following " +#~ "configuration as before." #~ msgstr "" +#~ "Ton éditeur de terminal devrait s'ouvrir" +#~ " et te permettre d'appliquer la " +#~ "configuration suivante comme précédemment." #~ msgid "" -#~ "A dataclass storing named Arrays in " -#~ "order. This means that it holds " -#~ "entries as an OrderedDict[str, Array]. " -#~ "ParametersRecord objects can be viewed " -#~ "as an equivalent to PyTorch's " -#~ "state_dict, but holding serialised tensors " -#~ "instead." +#~ "Congratulations, you just downloaded all " +#~ "the necessary software needed for " +#~ "metrics tracking. Now, let’s start it." #~ msgstr "" +#~ "Félicitations, tu viens de télécharger " +#~ "tous les logiciels nécessaires au suivi" +#~ " des métriques, maintenant, démarrons-le." -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#~ msgstr "" +#~ msgid "Tracking metrics" +#~ msgstr "Suivi des mesures" -#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgid "" +#~ "Before running your Flower simulation, " +#~ "you have to start the monitoring " +#~ "tools you have just installed and " +#~ "configured." #~ msgstr "" +#~ "Avant de lancer ta simulation Flower," +#~ " tu dois démarrer les outils de " +#~ "surveillance que tu viens d'installer et" +#~ " de configurer." -#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgid "" +#~ "Please include the following argument in" +#~ " your Python code when starting a " +#~ "simulation." #~ msgstr "" - -#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." -#~ msgstr "flower-fleet-api" +#~ "Tu dois inclure l'argument suivant dans" +#~ " ton code Python lorsque tu démarres" +#~ " une simulation." + +#~ msgid "Now, you are ready to start your workload." +#~ msgstr "Maintenant, tu es prêt à commencer ta charge de travail." + +#~ msgid "" +#~ "Shortly after the simulation starts, you" +#~ " should see the following logs in " +#~ "your terminal:" +#~ msgstr "" +#~ "Peu de temps après le début de " +#~ "la simulation, tu devrais voir les " +#~ "journaux suivants dans ton terminal :" + +#~ msgid "You can look at everything at http://127.0.0.1:8265 ." +#~ msgstr "Tu peux tout regarder sur ``_ ." + +#~ msgid "" +#~ "It's a Ray Dashboard. You can " +#~ "navigate to Metrics (on the left " +#~ "panel, the lowest option)." +#~ msgstr "" +#~ "Il s'agit d'un tableau de bord " +#~ "Ray. Tu peux naviguer vers Metrics " +#~ "(sur le panneau de gauche, l'option " +#~ "la plus basse)." + +#~ msgid "" +#~ "Or alternatively, you can just see " +#~ "them in Grafana by clicking on the" +#~ " right-up corner, “View in Grafana”." +#~ " Please note that the Ray dashboard" +#~ " is only accessible during the " +#~ "simulation. After the simulation ends, " +#~ "you can only use Grafana to " +#~ "explore the metrics. You can start " +#~ "Grafana by going to " +#~ "``http://localhost:3000/``." +#~ msgstr "" +#~ "Ou alors, tu peux simplement les " +#~ "voir dans Grafana en cliquant sur " +#~ "le coin supérieur droit, \"View in " +#~ "Grafana\". Sache que le tableau de " +#~ "bord Ray n'est accessible que pendant" +#~ " la simulation. Une fois la " +#~ "simulation terminée, tu ne peux utiliser" +#~ " Grafana que pour explorer les " +#~ "métriques. Tu peux démarrer Grafana en" +#~ " te rendant sur `http://localhost:3000/``." + +#~ msgid "" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port ``3000`` on your" +#~ " machine as long as they are " +#~ "running." +#~ msgstr "" +#~ "Après avoir terminé la visualisation, " +#~ "arrête Prometheus et Grafana. C'est " +#~ "important car sinon ils bloqueront, par" +#~ " exemple, le port :code:`3000` sur ta" +#~ " machine tant qu'ils seront en cours" +#~ " d'exécution." + +#~ msgid "Resource allocation" +#~ msgstr "Allocation des ressources" + +#~ msgid "" +#~ "You must understand how the Ray " +#~ "library works to efficiently allocate " +#~ "system resources to simulation clients " +#~ "on your own." +#~ msgstr "" +#~ "Tu dois comprendre le fonctionnement de" +#~ " la bibliothèque Ray pour allouer " +#~ "efficacement les ressources du système " +#~ "aux clients de simulation de ton " +#~ "côté." + +#~ msgid "" +#~ "Initially, the simulation (which Ray " +#~ "handles under the hood) starts by " +#~ "default with all the available resources" +#~ " on the system, which it shares " +#~ "among the clients. It doesn't mean " +#~ "it divides it equally among all of" +#~ " them, nor that the model training" +#~ " happens at all of them " +#~ "simultaneously. You will learn more " +#~ "about that in the later part of" +#~ " this blog. You can check the " +#~ "system resources by running the " +#~ "following:" +#~ msgstr "" +#~ "Au départ, la simulation (que Ray " +#~ "gère sous le capot) démarre par " +#~ "défaut avec toutes les ressources " +#~ "disponibles sur le système, qu'elle " +#~ "partage entre les clients. Cela ne " +#~ "signifie pas qu'elle les divise de " +#~ "manière égale entre tous, ni que " +#~ "l'apprentissage du modèle se fait sur" +#~ " tous les clients simultanément. Tu " +#~ "en apprendras plus à ce sujet dans" +#~ " la suite de ce blog. Tu peux" +#~ " vérifier les ressources du système " +#~ "en exécutant ce qui suit :" + +#~ msgid "In Google Colab, the result you see might be similar to this:" +#~ msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" + +#~ msgid "" +#~ "However, you can overwrite the defaults." +#~ " When starting a simulation, do the" +#~ " following (you don't need to " +#~ "overwrite all of them):" +#~ msgstr "" +#~ "Cependant, tu peux écraser les valeurs" +#~ " par défaut. Lorsque tu démarres une" +#~ " simulation, fais ce qui suit (tu " +#~ "n'as pas besoin de les écraser " +#~ "toutes) :" + +#~ msgid "Let’s also specify the resource for a single client." +#~ msgstr "Spécifions également la ressource pour un seul client." + +#~ msgid "" +#~ "Now comes the crucial part. Ray " +#~ "will start a new client only when" +#~ " it has all the required resources" +#~ " (such that they run in parallel) " +#~ "when the resources allow." +#~ msgstr "" +#~ "Ray ne démarrera un nouveau client " +#~ "que lorsqu'il disposera de toutes les" +#~ " ressources nécessaires (de manière à " +#~ "ce qu'ils fonctionnent en parallèle) " +#~ "lorsque les ressources le permettront." + +#~ msgid "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ "``client_num_gpus = 0.5`` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ "``client_num_gpus = 2``, the simulation " +#~ "wouldn't start (even if you had 2" +#~ " GPUs but decided to set 1 in" +#~ " ``ray_init_args``)." +#~ msgstr "" +#~ "Dans l'exemple ci-dessus, un seul " +#~ "client sera exécuté, donc tes clients" +#~ " ne fonctionneront pas simultanément. En" +#~ " définissant :code:`client_num_gpus = 0.5`, " +#~ "tu pourras exécuter deux clients et " +#~ "donc les faire fonctionner simultanément. " +#~ "Fais attention à ne pas demander " +#~ "plus de ressources que celles " +#~ "disponibles. Si tu as spécifié " +#~ ":code:`client_num_gpus = 2`, la simulation " +#~ "ne démarrera pas (même si tu as" +#~ " 2 GPU mais que tu as décidé" +#~ " d'en définir 1 dans " +#~ ":code:`ray_init_args`)." + +#~ msgid "Q: I don't see any metrics logged." +#~ msgstr "Q : Je ne vois aucune mesure enregistrée." + +#~ msgid "" +#~ "A: The timeframe might not be " +#~ "properly set. The setting is in " +#~ "the top right corner (\"Last 30 " +#~ "minutes\" by default). Please change the" +#~ " timeframe to reflect the period when" +#~ " the simulation was running." +#~ msgstr "" +#~ "R : Il se peut que le délai" +#~ " ne soit pas correctement défini. Le" +#~ " paramètre se trouve dans le coin " +#~ "supérieur droit (\"Dernières 30 minutes\" " +#~ "par défaut). Modifie le délai pour " +#~ "qu'il corresponde à la période pendant" +#~ " laquelle la simulation s'est déroulée." + +#~ msgid "" +#~ "Q: I see “Grafana server not " +#~ "detected. Please make sure the Grafana" +#~ " server is running and refresh this" +#~ " page” after going to the Metrics " +#~ "tab in Ray Dashboard." +#~ msgstr "" +#~ "Q : Je vois s'afficher \"Serveur " +#~ "Grafana non détecté. Vérifie que le " +#~ "serveur Grafana fonctionne et actualise " +#~ "cette page\" après avoir accédé à " +#~ "l'onglet Métriques dans Ray Dashboard." + +#~ msgid "" +#~ "A: You probably don't have Grafana " +#~ "running. Please check the running " +#~ "services" +#~ msgstr "" +#~ "R : Grafana n'est probablement pas " +#~ "en cours d'exécution. Vérifie les " +#~ "services en cours d'exécution" + +#~ msgid "" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to http://127.0.0.1:8265." +#~ msgstr "" +#~ "Q : Je vois \"This site can't " +#~ "be reached\" quand je vais sur " +#~ "``_." + +#~ msgid "" +#~ "A: Either the simulation has already " +#~ "finished, or you still need to " +#~ "start Prometheus." +#~ msgstr "" +#~ "R : Soit la simulation est déjà" +#~ " terminée, soit tu dois encore " +#~ "démarrer Prometheus." + +#~ msgid "Resources" +#~ msgstr "Ressources" #~ msgid "" -#~ ":py:obj:`LegacyContext `\\ " -#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ "Ray Dashboard: https://docs.ray.io/en/latest/ray-" +#~ "observability/getting-started.html" #~ msgstr "" +#~ "Tableau de bord Ray : " +#~ "``_" -#~ msgid "run\\_driver\\_api" -#~ msgstr "flower-driver-api" +#~ msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +#~ msgstr "" +#~ "Ray Metrics : ``_" -#~ msgid "run\\_fleet\\_api" +#~ msgid "" +#~ "The ``VirtualClientEngine`` schedules, launches " +#~ "and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the ``VirtualClientEngine`` are:" #~ msgstr "" #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg+ configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to ``VirtualClientEngine``'s " +#~ "internals." #~ msgstr "" -#~ msgid "key shares." +#~ msgid "" +#~ "The ``VirtualClientEngine`` implements `virtual` " +#~ "clients using `Ray `_, an" +#~ " open-source framework for scalable " +#~ "Python workloads. In particular, Flower's " +#~ "``VirtualClientEngine`` makes use of `Actors" +#~ " `_ " +#~ "to spawn `virtual` clients and run " +#~ "their workload." #~ msgstr "" #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the ``ray_init_args`` input argument " +#~ "to ``start_simulation`` which the VCE " +#~ "internally passes to Ray's ``ray.init`` " +#~ "command. For a complete list of " +#~ "settings you can configure check the " +#~ "`ray.init `_ documentation. " +#~ "Do not set ``ray_init_args`` if you " +#~ "want the VCE to use all your " +#~ "system's CPUs and GPUs." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_simulation `\\" -#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ "By default the ``VirtualClientEngine`` assigns" +#~ " a single CPU core (and nothing " +#~ "else) to each virtual client. This " +#~ "means that if your system has 10" +#~ " cores, that many virtual clients can" +#~ " be concurrently running." +#~ msgstr "" + +#~ msgid "``num_cpus`` indicates the number of CPU cores a client would get." #~ msgstr "" #~ msgid "" -#~ "'A dictionary, e.g {\"\": , " -#~ "\"\": } to configure a " -#~ "backend. Values supported in are" -#~ " those included by " -#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ "``num_gpus`` indicates the **ratio** of " +#~ "GPU memory a client gets assigned." #~ msgstr "" #~ msgid "" -#~ "When diabled, only INFO, WARNING and " -#~ "ERROR log messages will be shown. " -#~ "If enabled, DEBUG-level logs will " -#~ "be displayed." +#~ "While the ``client_resources`` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ "``VirtualClientEngine`` will schedule 100 jobs" +#~ " to run (each simulating a client " +#~ "sampled by the strategy) and then " +#~ "will execute them in a resource-" +#~ "aware manner in batches of 8." #~ msgstr "" #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type Client. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ "Flower's ``VirtualClientEngine`` allows you to" +#~ " run FL simulations across multiple " +#~ "compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Convolutional Neural " -#~ "Network on CIFAR10 using Flower and " -#~ "PyTorch." +#~ "Pass ``ray_init_args={\"address\"=\"auto\"}`` to " +#~ "`start_simulation `_ so the " +#~ "``VirtualClientEngine`` attaches to a running" +#~ " Ray instance." #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à entraîner un réseau neuronal " -#~ "convolutif sur CIFAR10 à l'aide de " -#~ "Flower et PyTorch." #~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight-updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." +#~ "Start Ray on you head node: on " +#~ "the terminal type ``ray start --head``." +#~ " This command will print a few " +#~ "lines, one of which indicates how " +#~ "to attach other nodes to the head" +#~ " node." #~ msgstr "" -#~ "*Les clients* sont chargés de générer" -#~ " des mises à jour de poids " -#~ "individuelles pour le modèle en fonction" -#~ " de leurs ensembles de données " -#~ "locales. Ces mises à jour sont " -#~ "ensuite envoyées au *serveur* qui les" -#~ " agrège pour produire un meilleur " -#~ "modèle. Enfin, le *serveur* renvoie " -#~ "cette version améliorée du modèle à " -#~ "chaque *client*. Un cycle complet de " -#~ "mises à jour de poids s'appelle un" -#~ " *round*." #~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started. We first need to install " -#~ "Flower. You can do this by running" -#~ " :" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example ``ray start --address='192.168.1.132:6379'``" #~ msgstr "" -#~ "Maintenant que nous avons une idée " -#~ "générale de ce qui se passe, " -#~ "commençons. Nous devons d'abord installer " -#~ "Flower. Tu peux le faire en " -#~ "exécutant :" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead and install PyTorch and " -#~ "the **torchvision** library:" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " ``ray stop`` in each node's terminal" +#~ " (including the head node)." #~ msgstr "" -#~ "Puisque nous voulons utiliser PyTorch " -#~ "pour résoudre une tâche de vision " -#~ "par ordinateur, allons-y et installons " -#~ "PyTorch et la bibliothèque **torchvision** " -#~ ":" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Deep Learning with" -#~ " PyTorch " -#~ "`_." +#~ "User ``ray status`` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the ``VirtualClientEngine``." +#~ msgstr "" + +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the ``VirtualClientEngine`` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any ``ray " +#~ "start`` command (including when starting " +#~ "the head)" #~ msgstr "" -#~ "Maintenant que nous avons installé " -#~ "toutes nos dépendances, lançons une " -#~ "formation distribuée simple avec deux " -#~ "clients et un serveur. Notre procédure" -#~ " de formation et l'architecture de " -#~ "notre réseau sont basées sur `Deep " -#~ "Learning with PyTorch " -#~ "`_" -#~ " de PyTorch." #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and PyTorch related " -#~ "packages:" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key ``num_gpus`` in ``client_resources``." +#~ " This being said, Ray (used " +#~ "internally by the VCE) is by " +#~ "default:" #~ msgstr "" -#~ "Dans un fichier appelé :code:`client.py`, " -#~ "importe Flower et les paquets liés " -#~ "à PyTorch :" -#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set ``num_gpus=0.5`` and " +#~ "you have two GPUs in your system" +#~ " with different (e.g. 32GB and 8GB)" +#~ " VRAM amounts, they both would run" +#~ " 2 clients concurrently." #~ msgstr "" -#~ "En outre, nous définissons l'attribution " -#~ "des appareils dans PyTorch avec :" #~ msgid "" -#~ "We use PyTorch to load CIFAR10, a" -#~ " popular colored image classification " -#~ "dataset for machine learning. The " -#~ "PyTorch :code:`DataLoader()` downloads the " -#~ "training and test data that are " -#~ "then normalized." +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ "``CUDA_VISIBLE_DEVICES=\"\"`` when launching " +#~ "your experiment." #~ msgstr "" -#~ "Nous utilisons PyTorch pour charger " -#~ "CIFAR10, un ensemble de données de " -#~ "classification d'images colorées populaire " -#~ "pour l'apprentissage automatique. Le " -#~ ":code:`DataLoader()` de PyTorch télécharge les" -#~ " données d'entraînement et de test " -#~ "qui sont ensuite normalisées." #~ msgid "" -#~ "Define the loss and optimizer with " -#~ "PyTorch. The training of the dataset " -#~ "is done by looping over the " -#~ "dataset, measure the corresponding loss " -#~ "and optimize it." +#~ "In addition, the GPU resource limits " +#~ "passed to ``client_resources`` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" -#~ "Définis la perte et l'optimiseur avec" -#~ " PyTorch L'entraînement de l'ensemble de" -#~ " données se fait en bouclant sur " -#~ "l'ensemble de données, en mesurant la" -#~ " perte correspondante et en l'optimisant." #~ msgid "" -#~ "Define then the validation of the " -#~ "machine learning network. We loop over" -#~ " the test set and measure the " -#~ "loss and accuracy of the test set." +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of ``actor_kwargs`` we can pass" +#~ " the reserved key `\"on_actor_init_fn\"` in" +#~ " order to specify a function to " +#~ "be executed upon actor initialization. " +#~ "In this case, to enable GPU growth" +#~ " for TF workloads. It would look " +#~ "as follows:" #~ msgstr "" -#~ "Définis ensuite la validation du réseau" -#~ " d'apprentissage automatique. Nous passons " -#~ "en boucle sur l'ensemble de test " -#~ "et mesurons la perte et la " -#~ "précision de l'ensemble de test." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a PyTorch machine learning model," -#~ " we use the functions for the " -#~ "Flower clients." +#~ "For ensuring data instance-level privacy" +#~ " during local model training on the" +#~ " client side, consider leveraging privacy" +#~ " engines such as Opacus and " +#~ "TensorFlow Privacy. For examples of " +#~ "using Flower with these engines, please" +#~ " refer to the Flower examples " +#~ "directory (`Opacus " +#~ "`_, " +#~ "`Tensorflow Privacy " +#~ "`_)." #~ msgstr "" -#~ "Après avoir défini l'entraînement et le" -#~ " test d'un modèle d'apprentissage " -#~ "automatique PyTorch, nous utilisons les " -#~ "fonctions pour les clients Flower." #~ msgid "" -#~ "The Flower clients will use a " -#~ "simple CNN adapted from 'PyTorch: A " -#~ "60 Minute Blitz':" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ "``start_server`` function. It is usually " +#~ "recommended to adjust a few parameters" +#~ " during instantiation:" #~ msgstr "" -#~ "Les clients de Flower utiliseront un " -#~ "CNN simple adapté de \"PyTorch : A" -#~ " 60 Minute Blitz\" :" +#~ "Cela crée une stratégie dont tous " +#~ "les paramètres sont laissés à leur " +#~ "valeur par défaut et la transmet à" +#~ " la fonction :code:`start_server`. Il est" +#~ " généralement recommandé d'ajuster quelques " +#~ "paramètres lors de l'instanciation :" -#~ msgid "" -#~ "After loading the data set with " -#~ ":code:`load_data()` we define the Flower " -#~ "interface." +#~ msgid "flwr is the Flower command line interface." +#~ msgstr "" + +#~ msgid "Options" +#~ msgstr "Solution" + +#~ msgid "Install completion for the current shell." #~ msgstr "" -#~ "Après avoir chargé l'ensemble des " -#~ "données avec :code:`load_data()`, nous " -#~ "définissons l'interface Flower." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "PyTorch. Implementing :code:`NumPyClient` usually" -#~ " means defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "Show completion for the current shell," +#~ " to copy it or customize the " +#~ "installation." #~ msgstr "" -#~ "Flower fournit une classe de commodité" -#~ " appelée :code:`NumPyClient` qui facilite " -#~ "la mise en œuvre de l'interface " -#~ ":code:`Client` lorsque ta charge de " -#~ "travail utilise PyTorch. Mettre en œuvre" -#~ " :code:`NumPyClient` signifie généralement " -#~ "définir les méthodes suivantes " -#~ "(:code:`set_parameters` est cependant facultatif)" -#~ " :" -#~ msgid "which can be implemented in the following way:" -#~ msgstr "qui peut être mis en œuvre de la manière suivante :" +#~ msgid "Build a Flower App into a Flower App Bundle (FAB)." +#~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-" -#~ "pytorch`." +#~ "You can run ``flwr build`` without " +#~ "any arguments to bundle the app " +#~ "located in the current directory. " +#~ "Alternatively, you can you can specify" +#~ " a path using the ``--app`` option" +#~ " to bundle an app located at " +#~ "the provided path. For example:" #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré. Le" -#~ " code source complet " -#~ "`_ de cet exemple se " -#~ "trouve dans :code:`examples/quickstart-pytorch`." -#~ msgid "" -#~ "In this example, we split the " -#~ "dataset into two partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " -#~ "Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`node_id`:" +#~ msgid "``flwr build --app ./apps/flower-hello-world``." #~ msgstr "" -#~ msgid "" -#~ "The :code:`self.bst` is used to keep " -#~ "the Booster objects that remain " -#~ "consistent across rounds, allowing them " -#~ "to store predictions from trees " -#~ "integrated in earlier rounds and " -#~ "maintain other essential data structures " -#~ "for training." +#~ msgid "Path of the Flower App to bundle into a FAB" #~ msgstr "" -#~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. the " -#~ "returned Booster object and config are" -#~ " stored in :code:`self.bst` and " -#~ ":code:`self.config`, respectively. From the " -#~ "second round, we load the global " -#~ "model sent from server to " -#~ ":code:`self.bst`, and then update model " -#~ "weights on local training data with " -#~ "function :code:`local_boost` as follows:" +#~ msgid "Install a Flower App Bundle." +#~ msgstr "Installer Flower" + +#~ msgid "It can be ran with a single FAB file argument:" #~ msgstr "" -#~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`self.bst.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." +#~ msgid "``flwr install ./target_project.fab``" #~ msgstr "" -#~ msgid "" -#~ "In :code:`evaluate`, we call " -#~ ":code:`self.bst.eval_set` function to conduct " -#~ "evaluation on valid set. The AUC " -#~ "value will be returned." +#~ msgid "The target install directory can be specified with ``--flwr-dir``:" #~ msgstr "" -#~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients." +#~ msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" #~ msgstr "" #~ msgid "" -#~ "Let's now create the Federated Dataset" -#~ " abstraction that from ``flwr-datasets``" -#~ " that partitions the CIFAR-10. We " -#~ "will create small training and test " -#~ "set for each edge device and wrap" -#~ " each of them into a PyTorch " -#~ "``DataLoader``:" +#~ "This will install ``target_project`` to " +#~ "``./docs/flwr/``. By default, ``flwr-dir`` " +#~ "is equal to:" #~ msgstr "" -#~ msgid "Implementing a Flower client" -#~ msgstr "Mise en place d'un client Flower" +#~ msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +#~ msgstr "" -#~ msgid "" -#~ "To implement the Flower client, we " -#~ "create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``:" +#~ msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" #~ msgstr "" -#~ "Pour mettre en œuvre le client " -#~ "Flower, nous créons une sous-classe " -#~ "de ``flwr.client.NumPyClient`` et mettons en" -#~ " œuvre les trois méthodes " -#~ "``get_parameters``, ``fit`` et ``evaluate`` :" -#~ msgid "" -#~ "The function ``start_simulation`` accepts a" -#~ " number of arguments, amongst them " -#~ "the ``client_fn`` used to create " -#~ "``FlowerClient`` instances, the number of " -#~ "clients to simulate (``num_clients``), the " -#~ "number of federated learning rounds " -#~ "(``num_rounds``), and the strategy. The " -#~ "strategy encapsulates the federated learning" -#~ " approach/algorithm, for example, *Federated " -#~ "Averaging* (FedAvg)." +#~ msgid "``$HOME/.flwr/`` in all other cases" #~ msgstr "" -#~ "La fonction ``start_simulation`` accepte un" -#~ " certain nombre d'arguments, parmi lesquels" -#~ " le ``client_fn`` utilisé pour créer " -#~ "les instances ``FlowerClient``, le nombre " -#~ "de clients à simuler (``num_clients``), " -#~ "le nombre de tours d'apprentissage " -#~ "fédéré (``num_rounds``), et la stratégie. " -#~ "La stratégie encapsule l'approche/algorithme " -#~ "d'apprentissage fédéré, par exemple, " -#~ "*Federated Averaging* (FedAvg)." + +#~ msgid "The desired install path." +#~ msgstr "" + +#~ msgid "Optional argument" +#~ msgstr "Améliorations facultatives" + +#~ msgid "The source FAB file to install." +#~ msgstr "" + +#~ msgid "Get logs from a Flower project run." +#~ msgstr "" + +#~ msgid "Flag to stream or print logs from the Flower run" +#~ msgstr "" + +#~ msgid "default" +#~ msgstr "Flux de travail" + +#~ msgid "``True``" +#~ msgstr "" + +#~ msgid "Required argument" +#~ msgstr "Amélioration de la documentation" + +#~ msgid "The Flower run ID to query" +#~ msgstr "Rejoignez la communauté de Flower" + +#~ msgid "Path of the Flower project to run" +#~ msgstr "" + +#~ msgid "Name of the federation to run the app on" +#~ msgstr "" + +#~ msgid "Create new Flower App." +#~ msgstr "Serveur de Flower" + +#~ msgid "The ML framework to use" +#~ msgstr "" + +#~ msgid "options" +#~ msgstr "Solution" #~ msgid "" -#~ "The only thing left to do is " -#~ "to tell the strategy to call this" -#~ " function whenever it receives evaluation" -#~ " metric dictionaries from the clients:" +#~ "PyTorch | TensorFlow | sklearn | " +#~ "HuggingFace | JAX | MLX | NumPy" +#~ " | FlowerTune | Flower Baseline" #~ msgstr "" -#~ "La seule chose qui reste à faire" -#~ " est d'indiquer à la stratégie " -#~ "d'appeler cette fonction chaque fois " -#~ "qu'elle reçoit des dictionnaires de " -#~ "métriques d'évaluation de la part des" -#~ " clients :" -#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgid "The Flower username of the author" #~ msgstr "" -#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgid "The name of the Flower App" +#~ msgstr "Chargement des données" + +#~ msgid "Run Flower App." +#~ msgstr "Serveur de Flower" + +#~ msgid "Override configuration key-value pairs, should be of the format:" #~ msgstr "" -#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgid "" +#~ "`--run-config 'key1=\"value1\" key2=\"value2\"' " +#~ "--run-config 'key3=\"value3\"'`" #~ msgstr "" -#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "" +#~ "Note that `key1`, `key2`, and `key3` " +#~ "in this example need to exist " +#~ "inside the `pyproject.toml` in order to" +#~ " be properly overriden." #~ msgstr "" -#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgid "" +#~ "Use `--stream` with `flwr run` to " +#~ "display logs; logs are not streamed " +#~ "by default." #~ msgstr "" -#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgid "``False``" +#~ msgstr ":code:`évaluer`" + +#~ msgid "Path of the Flower App to run." +#~ msgstr "Chargement des données" + +#~ msgid "Name of the federation to run the app on." #~ msgstr "" -#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgid "" +#~ "Note that since version ``1.11.0``, " +#~ "``flower-server-app`` no longer supports" +#~ " passing a reference to a `ServerApp`" +#~ " attribute. Instead, you need to pass" +#~ " the path to Flower app via the" +#~ " argument ``--app``. This is the path" +#~ " to a directory containing a " +#~ "`pyproject.toml`. You can create a valid" +#~ " Flower app by executing ``flwr new``" +#~ " and following the prompt." #~ msgstr "" -#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgid "" +#~ ":py:obj:`Context `\\ \\(node\\_id\\," +#~ " node\\_config\\, state\\, run\\_config\\)" #~ msgstr "" -#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgid "" +#~ "Holds records added by the entity " +#~ "in a given run and that will " +#~ "stay local. This means that the " +#~ "data it holds will never leave the" +#~ " system it's running from. This can" +#~ " be used as an intermediate storage" +#~ " or scratchpad when executing mods. " +#~ "It can also be used as a " +#~ "memory to access at different points " +#~ "during the lifecycle of this entity " +#~ "(e.g. across multiple rounds)" #~ msgstr "" -#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgid "" +#~ "A config (key/value mapping) held by " +#~ "the entity in a given run and " +#~ "that will stay local. It can be" +#~ " used at any point during the " +#~ "lifecycle of this entity (e.g. across" +#~ " multiple rounds)" #~ msgstr "" -#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "" +#~ ":py:obj:`RUN_SUPEREXEC_ENTER " +#~ "`\\" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid "" +#~ ":py:obj:`RUN_SUPEREXEC_LEAVE " +#~ "`\\" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid "Abstract base Driver class for the Driver API." #~ msgstr "" -#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "Log error stating that module `ray` could not be imported." #~ msgstr "" -#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing JAX workload. " +#~ "We are using JAX to train a " +#~ "linear regression model on a scikit-" +#~ "learn dataset. We will structure the " +#~ "example similar to our `PyTorch - " +#~ "From Centralized To Federated " +#~ "`_ walkthrough. " +#~ "First, we build a centralized training" +#~ " approach based on the `Linear " +#~ "Regression with JAX " +#~ "`_" +#~ " tutorial`. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" +#~ "Ce tutoriel te montrera comment utiliser" +#~ " Flower pour construire une version " +#~ "fédérée d'une charge de travail JAX " +#~ "existante. Nous utilisons JAX pour " +#~ "entraîner un modèle de régression " +#~ "linéaire sur un ensemble de données " +#~ "scikit-learn. Nous structurerons l'exemple " +#~ "de la même manière que notre " +#~ "présentation `PyTorch - De la " +#~ "centralisation à la fédération " +#~ "`_. Tout " +#~ "d'abord, nous construisons une approche " +#~ "d'entraînement centralisée basée sur le " +#~ "tutoriel `Régression linéaire avec JAX " +#~ "`_." +#~ " Ensuite, nous nous appuyons sur le" +#~ " code d'entraînement centralisé pour " +#~ "exécuter l'entraînement de manière fédérée." -#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " ``jax``, ``jaxlib``, ``scikit-learn``, and" +#~ " ``flwr``:" #~ msgstr "" +#~ "Avant de commencer à construire notre" +#~ " exemple JAX, nous devons installer " +#~ "les paquets :code:`jax`, :code:`jaxlib`, :code" +#~ ":`scikit-learn`, et :code:`flwr` :" -#~ msgid "run\\_client\\_app" +#~ msgid "Linear Regression with JAX" +#~ msgstr "Régression linéaire avec JAX" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a ``Linear Regression`` model. If" +#~ " you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." +#~ msgstr "" +#~ "Nous commençons par une brève " +#~ "description du code d'entraînement centralisé" +#~ " basé sur un modèle :code:`Régression " +#~ "linéaire`. Si tu veux une explication" +#~ " plus approfondie de ce qui se " +#~ "passe, jette un coup d'œil à la" +#~ " documentation officielle `JAX " +#~ "`_." + +#~ msgid "" +#~ "Let's create a new file called " +#~ "``jax_training.py`` with all the components" +#~ " required for a traditional (centralized)" +#~ " linear regression training. First, the " +#~ "JAX packages ``jax`` and ``jaxlib`` need" +#~ " to be imported. In addition, we " +#~ "need to import ``sklearn`` since we " +#~ "use ``make_regression`` for the dataset " +#~ "and ``train_test_split`` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the ``flwr`` package " +#~ "for federated learning. This will be " +#~ "done later." +#~ msgstr "" +#~ "Créons un nouveau fichier appelé " +#~ ":code:`jax_training.py` avec tous les " +#~ "composants nécessaires pour un apprentissage" +#~ " traditionnel (centralisé) de la régression" +#~ " linéaire. Tout d'abord, les paquets " +#~ "JAX :code:`jax` et :code:`jaxlib` doivent " +#~ "être importés. En outre, nous devons " +#~ "importer :code:`sklearn` puisque nous " +#~ "utilisons :code:`make_regression` pour le jeu" +#~ " de données et :code:`train_test_split` " +#~ "pour diviser le jeu de données en" +#~ " un jeu d'entraînement et un jeu " +#~ "de test. Tu peux voir que nous " +#~ "n'avons pas encore importé le paquet " +#~ ":code:`flwr` pour l'apprentissage fédéré, ce" +#~ " qui sera fait plus tard." + +#~ msgid "" +#~ "The ``load_data()`` function loads the " +#~ "mentioned training and test sets." #~ msgstr "" +#~ "La fonction :code:`load_data()` charge les " +#~ "ensembles d'entraînement et de test " +#~ "mentionnés." -#~ msgid "run\\_supernode" -#~ msgstr "flower-superlink" +#~ msgid "" +#~ "The model architecture (a very simple" +#~ " ``Linear Regression`` model) is defined" +#~ " in ``load_model()``." +#~ msgstr "" +#~ "L'architecture du modèle (un modèle " +#~ ":code:`Régression linéaire` très simple) est" +#~ " définie dans :code:`load_model()`." -#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgid "" +#~ "We now need to define the training" +#~ " (function ``train()``), which loops over" +#~ " the training set and measures the" +#~ " loss (function ``loss_fn()``) for each " +#~ "batch of training examples. The loss " +#~ "function is separate since JAX takes " +#~ "derivatives with a ``grad()`` function " +#~ "(defined in the ``main()`` function and" +#~ " called in ``train()``)." +#~ msgstr "" +#~ "Nous devons maintenant définir l'entraînement" +#~ " (fonction :code:`train()`), qui boucle sur" +#~ " l'ensemble d'entraînement et mesure la " +#~ "perte (fonction :code:`loss_fn()`) pour chaque" +#~ " lot d'exemples d'entraînement. La fonction" +#~ " de perte est séparée puisque JAX " +#~ "prend des dérivés avec une fonction " +#~ ":code:`grad()` (définie dans la fonction " +#~ ":code:`main()` et appelée dans " +#~ ":code:`train()`)." + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function ``evaluation()``. " +#~ "The function takes all test examples " +#~ "and measures the loss of the " +#~ "linear regression model." #~ msgstr "" +#~ "L'évaluation du modèle est définie dans" +#~ " la fonction :code:`evaluation()`. La " +#~ "fonction prend tous les exemples de " +#~ "test et mesure la perte du modèle" +#~ " de régression linéaire." #~ msgid "" -#~ "When there isn't an exact match, " -#~ "all the existing keys in the " -#~ "layout map will be treated as a" -#~ " regex and map against the input " -#~ "key again. The first match will be" -#~ " returned, based on the key insertion" -#~ " order. Return None if there isn't" -#~ " any match found." +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the ``jax.grad()`` function" +#~ " is defined in ``main()`` and passed" +#~ " to ``train()``." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, " +#~ "l'entraînement et l'évaluation, nous pouvons" +#~ " tout assembler et entraîner notre " +#~ "modèle à l'aide de JAX. Comme nous" +#~ " l'avons déjà mentionné, la fonction " +#~ ":code:`jax.grad()` est définie dans " +#~ ":code:`main()` et transmise à :code:`train()`." -#~ msgid "the string key as the query for the layout." +#~ msgid "You can now run your (centralized) JAX linear regression workload:" #~ msgstr "" +#~ "Tu peux maintenant exécuter ta charge" +#~ " de travail (centralisée) de régression " +#~ "linéaire JAX :" -#~ msgid "Corresponding layout based on the query." +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used JAX before." +#~ " Let's take the next step and " +#~ "use what we've built to create a" +#~ " simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." #~ msgstr "" +#~ "Jusqu'à présent, tout cela devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé JAX. Passons à l'étape " +#~ "suivante et utilisons ce que nous " +#~ "avons construit pour créer un simple " +#~ "système d'apprentissage fédéré composé d'un" +#~ " serveur et de deux clients." -#~ msgid "run\\_server\\_app" +#~ msgid "JAX meets Flower" +#~ msgstr "JAX rencontre Flower" + +#~ msgid "" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in ``jax_training.py`` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." +#~ msgstr "" +#~ "Le concept de fédération d'une charge" +#~ " de travail existante est toujours le" +#~ " même et facile à comprendre. Nous" +#~ " devons démarrer un *serveur*, puis " +#~ "utiliser le code dans :code:`jax_training.py`" +#~ " pour les *clients* qui sont " +#~ "connectés au *serveur*.Le *serveur* envoie " +#~ "les paramètres du modèle aux clients.Les" +#~ " *clients* exécutent la formation et " +#~ "mettent à jour les paramètres.Les " +#~ "paramètres mis à jour sont renvoyés " +#~ "au *serveur*, qui fait la moyenne " +#~ "de toutes les mises à jour de " +#~ "paramètres reçues.Ceci décrit un tour du" +#~ " processus d'apprentissage fédéré, et nous" +#~ " répétons cette opération pour plusieurs" +#~ " tours." + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined JAX training " +#~ "in ``jax_training.py``. Our *client* needs " +#~ "to import ``flwr``, but also ``jax`` " +#~ "and ``jaxlib`` to update the parameters" +#~ " on our JAX model:" #~ msgstr "" +#~ "Enfin, nous allons définir la logique" +#~ " de notre *client* dans :code:`client.py`" +#~ " et nous appuyer sur la formation " +#~ "JAX définie précédemment dans " +#~ ":code:`jax_training.py`. Notre *client* doit " +#~ "importer :code:`flwr`, mais aussi :code:`jax`" +#~ " et :code:`jaxlib` pour mettre à jour" +#~ " les paramètres de notre modèle JAX" +#~ " :" -#~ msgid "run\\_superlink" -#~ msgstr "flower-superlink" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``FlowerClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. ``FlowerClient`` needs to implement" +#~ " four methods, two methods for " +#~ "getting/setting model parameters, one method" +#~ " for training the model, and one " +#~ "method for testing the model:" +#~ msgstr "" +#~ "L'implémentation d'un *client* Flower signifie" +#~ " essentiellement l'implémentation d'une sous-" +#~ "classe de :code:`flwr.client.Client` ou " +#~ ":code:`flwr.client.NumPyClient`. Notre implémentation " +#~ "sera basée sur :code:`flwr.client.NumPyClient` " +#~ "et nous l'appellerons :code:`FlowerClient`. " +#~ ":code:`NumPyClient` est légèrement plus facile" +#~ " à implémenter que :code:`Client` si " +#~ "vous utilisez un framework avec une " +#~ "bonne interopérabilité NumPy (comme JAX) " +#~ "parce qu'il évite une partie du " +#~ "boilerplate qui serait autrement nécessaire." +#~ " :code:`FlowerClient` doit implémenter quatre " +#~ "méthodes, deux méthodes pour obtenir/régler" +#~ " les paramètres du modèle, une " +#~ "méthode pour former le modèle, et " +#~ "une méthode pour tester le modèle " +#~ ":" + +#~ msgid "``set_parameters (optional)``" +#~ msgstr ":code:`set_parameters (optional)`" + +#~ msgid "transform parameters to NumPy ``ndarray``'s" +#~ msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" + +#~ msgid "get the updated local model parameters and return them to the server" +#~ msgstr "" +#~ "récupère les paramètres du modèle local" +#~ " mis à jour et les renvoie au" +#~ " serveur" + +#~ msgid "return the local loss to the server" +#~ msgstr "renvoie la perte locale au serveur" #~ msgid "" -#~ ":py:obj:`start_simulation `\\" -#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ "``DeviceArray`` to ``NumPy ndarray`` to " +#~ "make them compatible with `NumPyClient`." +#~ msgstr "" +#~ "La partie la plus difficile consiste " +#~ "à transformer les paramètres du modèle" +#~ " JAX de :code:`DeviceArray` en :code:`NumPy" +#~ " ndarray` pour les rendre compatibles " +#~ "avec `NumPyClient`." + +#~ msgid "" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``evaluate()`` " +#~ "previously defined in ``jax_training.py``. So" +#~ " what we really do here is we" +#~ " tell Flower through our ``NumPyClient``" +#~ " subclass which of our already " +#~ "defined functions to call for training" +#~ " and evaluation. We included type " +#~ "annotations to give you a better " +#~ "understanding of the data types that " +#~ "get passed around." #~ msgstr "" +#~ "Les deux méthodes :code:`NumPyClient` " +#~ ":code:`fit` et :code:`evaluate` utilisent les" +#~ " fonctions :code:`train()` et :code:`evaluate()`" +#~ " définies précédemment dans " +#~ ":code:`jax_training.py`. Ce que nous faisons" +#~ " vraiment ici, c'est que nous " +#~ "indiquons à Flower, par le biais " +#~ "de notre sous-classe :code:`NumPyClient`, " +#~ "laquelle de nos fonctions déjà définies" +#~ " doit être appelée pour l'entraînement " +#~ "et l'évaluation. Nous avons inclus des" +#~ " annotations de type pour te donner" +#~ " une meilleure compréhension des types " +#~ "de données qui sont transmis." + +#~ msgid "Having defined the federation process, we can run it." +#~ msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." #~ msgid "" -#~ "A function creating `Client` instances. " -#~ "The function must have the signature " -#~ "`client_fn(context: Context). It should return" -#~ " a single client instance of type " -#~ "`Client`. Note that the created client" -#~ " instances are ephemeral and will " -#~ "often be destroyed after a single " -#~ "method invocation. Since client instances " -#~ "are not long-lived, they should " -#~ "not attempt to carry state over " -#~ "method invocations. Any state required " -#~ "by the instance (model, dataset, " -#~ "hyperparameters, ...) should be (re-)created" -#~ " in either the call to `client_fn`" -#~ " or the call to any of the " -#~ "client methods (e.g., load evaluation " -#~ "data in the `evaluate` method itself)." +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your JAX " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur est toujours en cours " +#~ "d'exécution avant de le faire) et " +#~ "tu verras que ton projet JAX " +#~ "exécute l'apprentissage fédéré sur deux " +#~ "clients. Félicitations !" -#~ msgid "The total number of clients in this simulation." +#~ msgid "" +#~ "The source code of this example " +#~ "was improved over time and can be" +#~ " found here: `Quickstart JAX " +#~ "`_. Our example is somewhat over-" +#~ "simplified because both clients load the" +#~ " same dataset." #~ msgstr "" +#~ "Le code source de cet exemple a" +#~ " été amélioré au fil du temps " +#~ "et peut être trouvé ici : " +#~ "`Quickstart JAX " +#~ "`_. Notre exemple est quelque peu" +#~ " simplifié à l'extrême car les deux" +#~ " clients chargent le même jeu de " +#~ "données." #~ msgid "" -#~ "UNSUPPORTED, WILL BE REMOVED. USE " -#~ "`num_clients` INSTEAD. List `client_id`s for" -#~ " each client. This is only required" -#~ " if `num_clients` is not set. Setting" -#~ " both `num_clients` and `clients_ids` with" -#~ " `len(clients_ids)` not equal to " -#~ "`num_clients` generates an error. Using " -#~ "this argument will raise an error." +#~ "You're now prepared to explore this " +#~ "topic further. How about using a " +#~ "more sophisticated model or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" +#~ "Tu es maintenant prêt à approfondir " +#~ "ce sujet. Pourquoi ne pas utiliser " +#~ "un modèle plus sophistiqué ou un " +#~ "ensemble de données différent ? Pourquoi" +#~ " ne pas ajouter d'autres clients ?" #~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. To understand the GPU" -#~ " utilization caused by `num_gpus`, as " -#~ "well as using custom resources, please" -#~ " consult the Ray documentation." +#~ "In this tutorial, we will learn " +#~ "how to train a ``Logistic Regression``" +#~ " model on MNIST using Flower and " +#~ "scikit-learn." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un :code:`modèle de " +#~ "régression logistique` sur MNIST en " +#~ "utilisant Flower et scikit-learn." #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "executing a ClientApp wrapping input " -#~ "argument `client_fn`." +#~ "Our example consists of one *server* " +#~ "and two *clients* all having the " +#~ "same model." #~ msgstr "" +#~ "Notre exemple consiste en un *serveur*" +#~ " et deux *clients* ayant tous le " +#~ "même modèle." #~ msgid "" -#~ "If you want to create your own " -#~ "Actor classes, you might need to " -#~ "pass some input argument. You can " -#~ "use this dictionary for such purpose." +#~ "*Clients* are responsible for generating " +#~ "individual model parameter updates for " +#~ "the model based on their local " +#~ "datasets. These updates are then sent" +#~ " to the *server* which will aggregate" +#~ " them to produce an updated global" +#~ " model. Finally, the *server* sends " +#~ "this improved version of the model " +#~ "back to each *client*. A complete " +#~ "cycle of parameters updates is called" +#~ " a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour individuelles des" +#~ " paramètres du modèle en fonction de" +#~ " leurs ensembles de données locales. " +#~ "Ces mises à jour sont ensuite " +#~ "envoyées au *serveur* qui les agrège " +#~ "pour produire un modèle global mis " +#~ "à jour. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour des paramètres s'appelle" +#~ " un *round*." #~ msgid "" -#~ "(default: \"DEFAULT\") Optional string " -#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" -#~ " to choose in which node the " -#~ "actor is placed. If you are an " -#~ "advanced user needed more control you" -#~ " can use lower-level scheduling " -#~ "strategies to pin actors to specific " -#~ "compute nodes (e.g. via " -#~ "NodeAffinitySchedulingStrategy). Please note this" -#~ " is an advanced feature. For all " -#~ "details, please refer to the Ray " -#~ "documentation: https://docs.ray.io/en/latest/ray-" -#~ "core/scheduling/index.html" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by " +#~ "running:" #~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "approximative de ce qui se passe, " +#~ "commençons. Nous devons d'abord installer " +#~ "Flower. Tu peux le faire en " +#~ "lançant :" + +#~ msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#~ msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#~ msgid "Or simply install all dependencies using Poetry:" +#~ msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with FastAI to train a vision " -#~ "model on CIFAR-10." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within ``utils.py``. The " +#~ "``utils.py`` contains different functions " +#~ "defining all the machine learning " +#~ "basics:" #~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, exécutons une formation " +#~ "distribuée simple avec deux clients et" +#~ " un serveur. Cependant, avant de " +#~ "configurer le client et le serveur, " +#~ "nous allons définir toutes les " +#~ "fonctionnalités dont nous avons besoin " +#~ "pour notre configuration d'apprentissage " +#~ "fédéré dans :code:`utils.py`. Le " +#~ ":code:`utils.py` contient différentes fonctions " +#~ "définissant toutes les bases de " +#~ "l'apprentissage automatique :" -#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgid "``get_model_parameters()``" +#~ msgstr ":code:`get_model_parameters()`" + +#~ msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant fastai et Flower !" +#~ "Renvoie les paramètres d'un modèle de" +#~ " régression logistique :code:`sklearn`" + +#~ msgid "``set_model_params()``" +#~ msgstr ":code:`set_model_params()`" + +#~ msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +#~ msgstr "" +#~ "Définit les paramètres d'un modèle de" +#~ " régression logistique :code:`sklean`" + +#~ msgid "``set_initial_params()``" +#~ msgstr ":code:`set_initial_params()`" + +#~ msgid "Initializes the model parameters that the Flower server will ask for" +#~ msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn more." +#~ "Please check out ``utils.py`` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the ``client.py`` and imported. The" +#~ " ``client.py`` also requires to import " +#~ "several packages such as Flower and " +#~ "scikit-learn:" #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet `_ pour en savoir plus." +#~ "Tu peux consulter :code:`utils.py` `ici " +#~ "`_ pour plus de " +#~ "détails. Les fonctions prédéfinies sont " +#~ "utilisées dans :code:`client.py` et importées." +#~ " :code:`client.py` nécessite également d'importer" +#~ " plusieurs paquets tels que Flower et" +#~ " scikit-learn :" #~ msgid "" -#~ "Check out this Federating Learning " -#~ "quickstart tutorial for using Flower " -#~ "with HuggingFace Transformers in order " -#~ "to fine-tune an LLM." +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The ``FederatedDataset.load_partition()`` method " +#~ "loads the partitioned training set for" +#~ " each partition ID defined in the " +#~ "``--partition-id`` argument." #~ msgstr "" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using Hugging Face Transformers and " -#~ "Flower!" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ "``utils.set_initial_params()``." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " à l'aide des transformateurs Hugging " -#~ "Face et de Flower !" +#~ "Ensuite, le modèle de régression " +#~ "logistique est défini et initialisé avec" +#~ " :code:`utils.set_initial_params()`." -#~ msgid "Dependencies" -#~ msgstr "Dépendances" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called ``Client``." +#~ " When the server selects a particular" +#~ " client for training, it sends " +#~ "training instructions over the network. " +#~ "The client receives those instructions " +#~ "and calls one of the ``Client`` " +#~ "methods to run your code (i.e., to" +#~ " fit the logistic regression we " +#~ "defined earlier)." +#~ msgstr "" +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour ajuster la régression logistique " +#~ "que nous avons définie plus tôt)." #~ msgid "" -#~ "To follow along this tutorial you " -#~ "will need to install the following " -#~ "packages: :code:`datasets`, :code:`evaluate`, " -#~ ":code:`flwr`, :code:`torch`, and " -#~ ":code:`transformers`. This can be done " -#~ "using :code:`pip`:" +#~ "Flower provides a convenience class " +#~ "called ``NumPyClient`` which makes it " +#~ "easier to implement the ``Client`` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing ``NumPyClient`` " +#~ "usually means defining the following " +#~ "methods (``set_parameters`` is optional " +#~ "though):" #~ msgstr "" -#~ "Pour suivre ce tutoriel, tu devras " -#~ "installer les paquets suivants : " -#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " -#~ ":code:`torch`, et :code:`transformers`. Cela " -#~ "peut être fait en utilisant :code:`pip`" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise scikit-learn. Mettre en" +#~ " œuvre :code:`NumPyClient` signifie généralement" +#~ " définir les méthodes suivantes " +#~ "(:code:`set_parameters` est cependant facultatif)" #~ " :" -#~ msgid "Standard Hugging Face workflow" -#~ msgstr "Flux de travail standard pour le visage" +#~ msgid "return the model weight as a list of NumPy ndarrays" +#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#~ msgid "Handling the data" -#~ msgstr "Traitement des données" +#~ msgid "``set_parameters`` (optional)" +#~ msgstr ":code:`set_parameters` (optionnel)" #~ msgid "" -#~ "To fetch the IMDB dataset, we will" -#~ " use Hugging Face's :code:`datasets` " -#~ "library. We then need to tokenize " -#~ "the data and create :code:`PyTorch` " -#~ "dataloaders, this is all done in " -#~ "the :code:`load_data` function:" +#~ "update the local model weights with " +#~ "the parameters received from the server" #~ msgstr "" -#~ "Pour récupérer le jeu de données " -#~ "IMDB, nous utiliserons la bibliothèque " -#~ ":code:`datasets` de Hugging Face. Nous " -#~ "devons ensuite tokeniser les données et" -#~ " créer des :code:`PyTorch` dataloaders, ce" -#~ " qui est fait dans la fonction " -#~ ":code:`load_data` :" +#~ "mettre à jour les poids du modèle" +#~ " local avec les paramètres reçus du" +#~ " serveur" -#~ msgid "Training and testing the model" -#~ msgstr "Former et tester le modèle" +#~ msgid "is directly imported with ``utils.set_model_params()``" +#~ msgstr "est directement importé avec :code:`utils.set_model_params()`" + +#~ msgid "set the local model weights" +#~ msgstr "fixe les poids du modèle local" + +#~ msgid "train the local model" +#~ msgstr "entraîne le modèle local" + +#~ msgid "return the updated local model weights" +#~ msgstr "recevoir les poids du modèle local mis à jour" + +#~ msgid "test the local model" +#~ msgstr "teste le modèle local" + +#~ msgid "The methods can be implemented in the following way:" +#~ msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" #~ msgid "" -#~ "Once we have a way of creating " -#~ "our trainloader and testloader, we can" -#~ " take care of the training and " -#~ "testing. This is very similar to " -#~ "any :code:`PyTorch` training or testing " -#~ "loop:" +#~ "We can now create an instance of" +#~ " our class ``MnistClient`` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ "Une fois que nous avons trouvé un" -#~ " moyen de créer notre trainloader et" -#~ " notre testloader, nous pouvons nous " -#~ "occuper de l'entraînement et du test." -#~ " C'est très similaire à n'importe " -#~ "quelle boucle d'entraînement ou de test" -#~ " :code:`PyTorch` :" - -#~ msgid "Creating the model itself" -#~ msgstr "Créer le modèle lui-même" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`MnistClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ "To create the model itself, we " -#~ "will just load the pre-trained " -#~ "distillBERT model using Hugging Face’s " -#~ ":code:`AutoModelForSequenceClassification` :" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` or " +#~ "``NumPyClient`` and call " +#~ "``fl.client.start_client()``. If you implement " +#~ "a client of type ``NumPyClient`` you'll" +#~ " need to first call its " +#~ "``to_client()`` method. The string " +#~ "``\"0.0.0.0:8080\"`` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use ``\"0.0.0.0:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we pass to the client." #~ msgstr "" -#~ "Pour créer le modèle lui-même, " -#~ "nous allons simplement charger le modèle" -#~ " distillBERT pré-entraîné en utilisant le" -#~ " :code:`AutoModelForSequenceClassification` de Hugging" -#~ " Face :" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"0.0.0:8080\"` indique au client à " +#~ "quel serveur se connecter. Dans notre" +#~ " cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" +#~ " charge de travail véritablement fédérée" +#~ " avec le serveur et les clients " +#~ "s'exécutant sur des machines différentes, " +#~ "tout ce qui doit changer est " +#~ ":code:`server_address` que nous transmettons " +#~ "au client." -#~ msgid "Creating the IMDBClient" -#~ msgstr "Création du client IMDBC" +#~ msgid "" +#~ "The following Flower server is a " +#~ "little bit more advanced and returns " +#~ "an evaluation function for the " +#~ "server-side evaluation. First, we import" +#~ " again all required libraries such as" +#~ " Flower and scikit-learn." +#~ msgstr "" +#~ "Le serveur Flower suivant est un " +#~ "peu plus avancé et renvoie une " +#~ "fonction d'évaluation pour l'évaluation côté" +#~ " serveur. Tout d'abord, nous importons " +#~ "à nouveau toutes les bibliothèques " +#~ "requises telles que Flower et scikit-" +#~ "learn." + +#~ msgid "``server.py``, import Flower and start the server:" +#~ msgstr ":code:`server.py`, importe Flower et démarre le serveur :" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in ``fit_round()`` and the" +#~ " evaluation is defined in " +#~ "``get_evaluate_fn()``. The evaluation function " +#~ "is called after each federated learning" +#~ " round and gives you information " +#~ "about loss and accuracy. Note that " +#~ "we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" +#~ "Le nombre de tours d'apprentissage " +#~ "fédéré est défini dans :code:`fit_round()` " +#~ "et l'évaluation est définie dans " +#~ ":code:`get_evaluate_fn()`. La fonction d'évaluation" +#~ " est appelée après chaque tour " +#~ "d'apprentissage fédéré et te donne des" +#~ " informations sur la perte et la " +#~ "précision." + +#~ msgid "" +#~ "The ``main`` contains the server-side" +#~ " parameter initialization " +#~ "``utils.set_initial_params()`` as well as the" +#~ " aggregation strategy ``fl.server.strategy:FedAvg()``." +#~ " The strategy is the default one, " +#~ "federated averaging (or FedAvg), with " +#~ "two clients and evaluation after each" +#~ " federated learning round. The server " +#~ "can be started with the command " +#~ "``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))``." +#~ msgstr "" +#~ "Le :code:`main` contient l'initialisation des" +#~ " paramètres côté serveur " +#~ ":code:`utils.set_initial_params()` ainsi que la " +#~ "stratégie d'agrégation " +#~ ":code:`fl.server.strategy:FedAvg()`. La stratégie " +#~ "est celle par défaut, la moyenne " +#~ "fédérée (ou FedAvg), avec deux clients" +#~ " et une évaluation après chaque tour" +#~ " d'apprentissage fédéré. Le serveur peut" +#~ " être démarré avec la commande " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." #~ msgid "" -#~ "To federate our example to multiple " -#~ "clients, we first need to write " -#~ "our Flower client class (inheriting from" -#~ " :code:`flwr.client.NumPyClient`). This is very" -#~ " easy, as our model is a " -#~ "standard :code:`PyTorch` model:" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We, " +#~ "therefore, have to start the server " +#~ "first:" #~ msgstr "" -#~ "Pour fédérer notre exemple à plusieurs" -#~ " clients, nous devons d'abord écrire " -#~ "notre classe de client Flower (héritant" -#~ " de :code:`flwr.client.NumPyClient`). C'est très" -#~ " facile, car notre modèle est un " -#~ "modèle :code:`PyTorch` standard :" +#~ "Le client et le serveur étant " +#~ "prêts, nous pouvons maintenant tout " +#~ "lancer et voir l'apprentissage fédéré en" +#~ " action. Les systèmes d'apprentissage " +#~ "fédéré ont généralement un serveur et" +#~ " plusieurs clients. Nous devons donc " +#~ "commencer par lancer le serveur :" #~ msgid "" -#~ "The :code:`get_parameters` function lets the" -#~ " server get the client's parameters. " -#~ "Inversely, the :code:`set_parameters` function " -#~ "allows the server to send its " -#~ "parameters to the client. Finally, the" -#~ " :code:`fit` function trains the model " -#~ "locally for the client, and the " -#~ ":code:`evaluate` function tests the model " -#~ "locally and returns the relevant " -#~ "metrics." +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Open a new terminal and " +#~ "start the first client:" #~ msgstr "" -#~ "La fonction :code:`get_parameters` permet au" -#~ " serveur d'obtenir les paramètres du " -#~ "client. Inversement, la fonction " -#~ ":code:`set_parameters` permet au serveur " -#~ "d'envoyer ses paramètres au client. " -#~ "Enfin, la fonction :code:`fit` forme le" -#~ " modèle localement pour le client, et" -#~ " la fonction :code:`evaluate` teste le " -#~ "modèle localement et renvoie les mesures" -#~ " correspondantes." +#~ "Une fois que le serveur fonctionne, " +#~ "nous pouvons démarrer les clients dans" +#~ " différents terminaux. Ouvre un nouveau " +#~ "terminal et démarre le premier client" +#~ " :" -#~ msgid "Starting the server" -#~ msgstr "Démarrer le serveur" +#~ msgid "Open another terminal and start the second client:" +#~ msgstr "Ouvre un autre terminal et démarre le deuxième client :" #~ msgid "" -#~ "Now that we have a way to " -#~ "instantiate clients, we need to create" -#~ " our server in order to aggregate " -#~ "the results. Using Flower, this can " -#~ "be done very easily by first " -#~ "choosing a strategy (here, we are " -#~ "using :code:`FedAvg`, which will define " -#~ "the global weights as the average " -#~ "of all the clients' weights at " -#~ "each round) and then using the " -#~ ":code:`flwr.server.start_server` function:" +#~ "Each client will have its own " +#~ "dataset. You should now see how " +#~ "the training does in the very " +#~ "first terminal (the one that started " +#~ "the server):" #~ msgstr "" -#~ "Maintenant que nous avons un moyen " -#~ "d'instancier les clients, nous devons " -#~ "créer notre serveur afin d'agréger les" -#~ " résultats. Avec Flower, cela peut " -#~ "être fait très facilement en choisissant" -#~ " d'abord une stratégie (ici, nous " -#~ "utilisons :code:`FedAvg`, qui définira les " -#~ "poids globaux comme la moyenne des " -#~ "poids de tous les clients à chaque" -#~ " tour) et en utilisant ensuite la " -#~ "fonction :code:`flwr.server.start_server` :" +#~ "Chaque client aura son propre ensemble" +#~ " de données. Tu devrais maintenant " +#~ "voir comment la formation se déroule " +#~ "dans le tout premier terminal (celui " +#~ "qui a démarré le serveur) :" #~ msgid "" -#~ "The :code:`weighted_average` function is there" -#~ " to provide a way to aggregate " -#~ "the metrics distributed amongst the " -#~ "clients (basically this allows us to " -#~ "display a nice average accuracy and " -#~ "loss for every round)." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in ``examples/sklearn-logreg-" +#~ "mnist``." #~ msgstr "" -#~ "La fonction :code:`weighted_average` est là" -#~ " pour fournir un moyen d'agréger les" -#~ " mesures réparties entre les clients " -#~ "(en gros, cela nous permet d'afficher" -#~ " une belle moyenne de précision et" -#~ " de perte pour chaque tour)." +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/sklearn-logreg-" +#~ "mnist`." -#~ msgid "Putting everything together" -#~ msgstr "Tout assembler" +#~ msgid "Federated XGBoost" +#~ msgstr "Formation fédérée" -#~ msgid "We can now start client instances using:" +#~ msgid "" +#~ "Indeed, as the demand for data " +#~ "privacy and decentralized learning grows, " +#~ "there's an increasing requirement to " +#~ "implement federated XGBoost systems for " +#~ "specialised applications, like survival " +#~ "analysis and financial fraud detection." #~ msgstr "" -#~ "Nous pouvons maintenant démarrer des " -#~ "instances de clients en utilisant :" #~ msgid "" -#~ "And they will be able to connect" -#~ " to the server and start the " -#~ "federated training." +#~ "Federated learning ensures that raw data" +#~ " remains on the local device, making" +#~ " it an attractive approach for " +#~ "sensitive domains where data security " +#~ "and privacy are paramount. Given the " +#~ "robustness and efficiency of XGBoost, " +#~ "combining it with federated learning " +#~ "offers a promising solution for these" +#~ " specific challenges." #~ msgstr "" -#~ "Et ils pourront se connecter au " -#~ "serveur et démarrer la formation " -#~ "fédérée." #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the `full code example " -#~ "`_ ." +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " ``xgboost`` package. We use a simple" +#~ " example (`full code xgboost-quickstart " +#~ "`_) with two *clients* and " +#~ "one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ "Si tu veux voir tout ce qui " -#~ "est mis ensemble, tu devrais consulter" -#~ " l'exemple de code complet : " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." #~ msgid "" -#~ "Of course, this is a very basic" -#~ " example, and a lot can be " -#~ "added or modified, it was just to" -#~ " showcase how simply we could " -#~ "federate a Hugging Face workflow using" -#~ " Flower." +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a :doc:`virtualenv " +#~ "`." #~ msgstr "" -#~ "Bien sûr, c'est un exemple très " -#~ "basique, et beaucoup de choses peuvent" -#~ " être ajoutées ou modifiées, il " -#~ "s'agissait juste de montrer avec quelle" -#~ " simplicité on pouvait fédérer un " -#~ "flux de travail Hugging Face à " -#~ "l'aide de Flower." +#~ "Tout d'abord, il est recommandé de " +#~ "créer un environnement virtuel et de " +#~ "tout exécuter au sein d'un `virtualenv" +#~ " `_." #~ msgid "" -#~ "Note that in this example we used" -#~ " :code:`PyTorch`, but we could have " -#~ "very well used :code:`TensorFlow`." +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. Now" +#~ " that we have all our dependencies" +#~ " installed, let's run a simple " +#~ "distributed training with two clients " +#~ "and one server." #~ msgstr "" -#~ "Notez que dans cet exemple, nous " -#~ "avons utilisé :code:`PyTorch`, mais nous " -#~ "aurions très bien pu utiliser " -#~ ":code:`TensorFlow`." #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with PyTorch Lightning to train an " -#~ "Auto Encoder model on MNIST." +#~ "In a file called ``client.py``, import" +#~ " xgboost, Flower, Flower Datasets and " +#~ "other related functions:" #~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "à PyTorch :" -#~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using PyTorch Lightning " -#~ "and Flower!" +#~ msgid "Dataset partition and hyper-parameter selection" #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant PyTorch Lightning et " -#~ "Flower !" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "Prior to local training, we require " +#~ "loading the HIGGS dataset from Flower" +#~ " Datasets and conduct data partitioning " +#~ "for FL:" #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet `_ pour en " -#~ "savoir plus." #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with TensorFlow to train a MobilNetV2" -#~ " model on CIFAR-10." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (``IidPartitioner(num_partitions=30)``). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ "``partition_id``:" #~ msgstr "" -#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for ``xgboost`` package." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en moins de 20 lignes de code" -#~ " !" -#~ msgid "Before Flower can be imported we have to install it:" -#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" +#~ msgid "Finally, we define the hyper-parameters used for XGBoost training." +#~ msgstr "" #~ msgid "" -#~ "Since we want to use the Keras " -#~ "API of TensorFlow (TF), we have to" -#~ " install TF as well:" +#~ "The ``num_local_round`` represents the number" +#~ " of iterations for local tree boost." +#~ " We use CPU for the training in" +#~ " default. One can shift it to " +#~ "GPU by setting ``tree_method`` to " +#~ "``gpu_hist``. We use AUC as evaluation" +#~ " metric." #~ msgstr "" -#~ "Comme nous voulons utiliser l'API Keras" -#~ " de TensorFlow (TF), nous devons " -#~ "également installer TF :" -#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgid "Flower client definition for XGBoost" #~ msgstr "" -#~ "Ensuite, dans un fichier appelé " -#~ ":code:`client.py`, importe Flower et " -#~ "TensorFlow :" #~ msgid "" -#~ "We use the Keras utilities of TF" -#~ " to load CIFAR10, a popular colored" -#~ " image classification dataset for machine" -#~ " learning. The call to " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " -#~ "CIFAR10, caches it locally, and then " -#~ "returns the entire training and test " -#~ "set as NumPy ndarrays." +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define ``XgbClient`` " +#~ "class inherited from ``fl.client.Client``." #~ msgstr "" -#~ "Nous utilisons les utilitaires Keras de" -#~ " TF pour charger CIFAR10, un ensemble" -#~ " de données de classification d'images " -#~ "colorées populaire pour l'apprentissage " -#~ "automatique. L'appel à " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " -#~ "CIFAR10, le met en cache localement, " -#~ "puis renvoie l'ensemble d'entraînement et " -#~ "de test sous forme de NumPy " -#~ "ndarrays." #~ msgid "" -#~ "Next, we need a model. For the " -#~ "purpose of this tutorial, we use " -#~ "MobilNetV2 with 10 output classes:" +#~ "All required parameters defined above " +#~ "are passed to ``XgbClient``'s constructor." #~ msgstr "" -#~ "Ensuite, nous avons besoin d'un modèle." -#~ " Pour les besoins de ce tutoriel, " -#~ "nous utilisons MobilNetV2 avec 10 " -#~ "classes de sortie :" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to train the neural network" -#~ " we defined earlier)." +#~ "Then, we override ``get_parameters``, ``fit``" +#~ " and ``evaluate`` methods insides " +#~ "``XgbClient`` class as follows." #~ msgstr "" -#~ "Le serveur Flower interagit avec les " -#~ "clients par le biais d'une interface " -#~ "appelée :code:`Client`. Lorsque le serveur " -#~ "sélectionne un client particulier pour " -#~ "la formation, il envoie des instructions" -#~ " de formation sur le réseau. Le " -#~ "client reçoit ces instructions et " -#~ "appelle l'une des méthodes :code:`Client` " -#~ "pour exécuter ton code (c'est-à-dire " -#~ "pour former le réseau neuronal que " -#~ "nous avons défini plus tôt)." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses Keras." -#~ " The :code:`NumPyClient` interface defines " -#~ "three methods which can be implemented" -#~ " in the following way:" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use ``get_parameters`` and" +#~ " ``set_parameters`` to initialise model " +#~ "parameters for XGBoost. As a result, " +#~ "let's return an empty tensor in " +#~ "``get_parameters`` when it is called by" +#~ " the server at the first round." #~ msgstr "" -#~ "Flower fournit une classe de commodité" -#~ " appelée :code:`NumPyClient` qui facilite " -#~ "la mise en œuvre de l'interface " -#~ ":code:`Client` lorsque ta charge de " -#~ "travail utilise Keras. L'interface " -#~ ":code:`NumPyClient` définit trois méthodes qui" -#~ " peuvent être mises en œuvre de " -#~ "la manière suivante :" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`CifarClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "In ``fit``, at the first round, we" +#~ " call ``xgb.train()`` to build up the" +#~ " first set of trees. From the " +#~ "second round, we load the global " +#~ "model sent from server to new " +#~ "build Booster object, and then update" +#~ " model weights on local training data" +#~ " with function ``local_boost`` as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class ``XgbClient`` and add one" +#~ " line to actually run this client:" #~ msgstr "" #~ "Nous pouvons maintenant créer une " -#~ "instance de notre classe :code:`CifarClient`" +#~ "instance de notre classe :code:`MnistClient`" #~ " et ajouter une ligne pour exécuter" #~ " ce client :" #~ msgid "" #~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ " have to implement ``Client`` and " +#~ "call ``fl.client.start_client()``. The string " +#~ "``\"[::]:8080\"`` tells the client which " +#~ "server to connect to. In our case" +#~ " we can run the server and the" +#~ " client on the same machine, " +#~ "therefore we use ``\"[::]:8080\"``. If " #~ "we run a truly federated workload " #~ "with the server and clients running " #~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ " to change is the ``server_address`` " +#~ "we point the client at." #~ msgstr "" #~ "C'est tout pour le client. Il nous" #~ " suffit d'implémenter :code:`Client` ou " @@ -37689,785 +42019,991 @@ msgstr "" #~ " est l'adresse :code:`server_address` vers " #~ "laquelle nous dirigeons le client." -#~ msgid "Each client will have its own dataset." -#~ msgstr "Chaque client aura son propre ensemble de données." +#~ msgid "" +#~ "In a file named ``server.py``, import" +#~ " Flower and FedXgbBagging from " +#~ "``flwr.server.strategy``." +#~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "au MXNet :" + +#~ msgid "We first define a strategy for XGBoost bagging aggregation." +#~ msgstr "" #~ msgid "" -#~ "You should now see how the " -#~ "training does in the very first " -#~ "terminal (the one that started the " -#~ "server):" +#~ "We use two clients for this " +#~ "example. An ``evaluate_metrics_aggregation`` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The ``config_func`` function is " +#~ "to return the current FL round " +#~ "number to client's ``fit()`` and " +#~ "``evaluate()`` methods." +#~ msgstr "" + +#~ msgid "Then, we start the server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Tree-based bagging aggregation" +#~ msgstr "" + +#~ msgid "" +#~ "After traversal of all clients' models," +#~ " a new global model is generated, " +#~ "followed by the serialisation, and " +#~ "sending back to each client." #~ msgstr "" -#~ "Tu devrais maintenant voir comment la" -#~ " formation se déroule dans le tout" -#~ " premier terminal (celui qui a " -#~ "démarré le serveur) :" #~ msgid "" #~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this can be " -#~ "found in :code:`examples/quickstart-" -#~ "tensorflow/client.py`." +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in ``metrics_distributed``. One can" +#~ " see that the average AUC increases" +#~ " over FL rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support :doc:`Flower simulation" +#~ " ` making it" +#~ " easy to simulate large client " +#~ "cohorts in a resource-aware manner. " +#~ "Let's take a look!" +#~ msgstr "" + +#~ msgid "" +#~ "To do this, we first customise a" +#~ " ``ClientManager`` in ``server_utils.py``:" +#~ msgstr "" + +#~ msgid "" +#~ "The customised ``ClientManager`` samples all" +#~ " available clients in each FL round" +#~ " based on the order of connection " +#~ "to the server. Then, we define a" +#~ " new strategy ``FedXgbCyclic`` in " +#~ "``flwr.server.strategy.fedxgb_cyclic.py``, in order " +#~ "to sequentially select only one client" +#~ " in given round and pass the " +#~ "received model to next client." +#~ msgstr "" + +#~ msgid "Customised data partitioning" +#~ msgstr "" + +#~ msgid "" +#~ "In ``dataset.py``, we have a function" +#~ " ``instantiate_partitioner`` to instantiate the" +#~ " data partitioner based on the given" +#~ " ``num_partitions`` and ``partitioner_type``. " +#~ "Currently, we provide four supported " +#~ "partitioner type to simulate the " +#~ "uniformity/non-uniformity in data quantity " +#~ "(uniform, linear, square, exponential)." +#~ msgstr "" + +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in ``server_utils.py``:" +#~ msgstr "" + +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a ``Booster`` " +#~ "object and loads the global model " +#~ "weights to it. The evaluation is " +#~ "conducted by calling ``eval_set()`` method," +#~ " and the tested AUC value is " +#~ "reported." +#~ msgstr "" + +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ "``evaluate()`` method insides the " +#~ "``XgbClient`` class in ``client_utils.py``." +#~ msgstr "" + +#~ msgid "" +#~ "We also provide an example code " +#~ "(``sim.py``) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" + +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a ``main()`` function to " +#~ "perform the simulation process:" +#~ msgstr "" + +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ "``list``. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "Then, we define the strategies and other hyper-parameters:" +#~ msgstr "" + +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling ``fl.simulation.start_simulation``:" +#~ msgstr "" + +#~ msgid "" +#~ "One of key parameters for " +#~ "``start_simulation`` is ``client_fn`` which " +#~ "returns a function to construct a " +#~ "client. We define it as follows:" +#~ msgstr "" + +#~ msgid "Arguments parser" +#~ msgstr "" + +#~ msgid "" +#~ "In ``utils.py``, we define the arguments" +#~ " parsers for clients, server and " +#~ "simulation, allowing users to specify " +#~ "different experimental settings. Let's first" +#~ " see the sever side:" +#~ msgstr "" + +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ "``--centralised-eval``, the sever will do" +#~ " centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "Then, the argument parser on client side:" +#~ msgstr "" + +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " ``--centralised-eval``, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting ``--scaled-lr``." #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré. Le" -#~ " `code source complet " -#~ "`_ pour cela se trouve" -#~ " dans :code:`examples/quickstart-tensorflow/client.py`." -#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgid "We also have an argument parser for simulation:" #~ msgstr "" -#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgid "This integrates all arguments for both client and server sides." #~ msgstr "" -#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgid "" +#~ "To run a centralised evaluated " +#~ "experiment with bagging strategy on 5" +#~ " clients with exponential distribution for" +#~ " 50 rounds, we first start the " +#~ "server as below:" #~ msgstr "" -#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgid "Then, on each client terminal, we start the clients:" +#~ msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#~ msgid "To run the same experiment with Flower simulation:" #~ msgstr "" -#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgid "|ac0a9766e26044d6aea222a829859b20|" #~ msgstr "" -#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgid "|36cd6e248b1443ce8a82b5a025bba368|" #~ msgstr "" -#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" #~ msgstr "" -#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" #~ msgstr "" -#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgid "|7605632e1b0f49599ffacf841491fcfb|" #~ msgstr "" -#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgid "|91b1b5a7d3484eb7a2350c1923f18307|" #~ msgstr "" -#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgid "|5405ed430e4746e28b083b146fb71731|" #~ msgstr "" -#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgid "|a389e87dab394eb48a8949aa2397687b|" #~ msgstr "" -#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgid "|89c412136a5146ec8dc32c0973729f12|" #~ msgstr "" -#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgid "|9503d3dc3a144e8aa295f8800cd8a766|" #~ msgstr "" -#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" #~ msgstr "" -#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgid "|a7579ad7734347508e959d9e14f2f53d|" #~ msgstr "" -#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgid "|73d15dd1d4fc41678b2d54815503fbe8|" #~ msgstr "" -#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgid "|55472eef61274ba1b739408607e109df|" #~ msgstr "" -#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgid "" +#~ "Run ``python3 src/py/flwr_tool/update_changelog.py " +#~ "`` in order to add every" +#~ " new change to the changelog (feel" +#~ " free to make manual changes to " +#~ "the changelog afterwards until it looks" +#~ " good)." #~ msgstr "" -#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgid "" +#~ "When operating in a production " +#~ "environment, it is strongly recommended " +#~ "to enable Transport Layer Security (TLS)" +#~ " for each Flower Component to ensure" +#~ " secure communication." #~ msgstr "" -#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgid "" +#~ "To enable TLS, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." #~ msgstr "" -#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container:" #~ msgstr "" -#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgid "" +#~ "``--volume ./certificates/:/app/certificates/:ro``: Mount" +#~ " the ``certificates`` directory in" #~ msgstr "" -#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgid "" +#~ "the current working directory of the " +#~ "host machine as a read-only volume" +#~ " at the" #~ msgstr "" -#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgid "``/app/certificates`` directory inside the container." #~ msgstr "" -#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgid "" +#~ "Assuming that the ``ca.crt`` certificate " +#~ "already exists locally, we can use " +#~ "the flag ``--volume`` to mount the " +#~ "local certificate into the container's " +#~ "``/app/`` directory." #~ msgstr "" -#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgid "" +#~ "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the " +#~ "``ca.crt`` file from the" #~ msgstr "" -#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgid "" +#~ "current working directory of the host" +#~ " machine as a read-only volume " +#~ "at the ``/app/ca.crt``" +#~ msgstr "" + +#~ msgid "SuperExec" #~ msgstr "" #~ msgid "" -#~ "Install `xz` (to install different " -#~ "Python versions) and `pandoc` to build" -#~ " the docs::" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory where" +#~ " the SuperExec will be executed from," +#~ " we can use the flag ``--volume`` " +#~ "to mount the local directory into " +#~ "the ``/app/certificates/`` directory of the" +#~ " container:" #~ msgstr "" #~ msgid "" -#~ "Ensure you system (Ubuntu 22.04+) is " -#~ "up-to-date, and you have all " -#~ "necessary packages::" +#~ ":substitution-code:`flwr/superexec:|stable_flwr_version|`: " +#~ "The name of the image to be " +#~ "run and the specific" +#~ msgstr "" + +#~ msgid "SuperExec." #~ msgstr "" #~ msgid "" -#~ "Let's create the Python environment for" -#~ " all-things Flower. If you wish " -#~ "to use :code:`pyenv`, we provide two " -#~ "convenience scripts that you can use." -#~ " If you prefer using something else" -#~ " than :code:`pyenv`, create a new " -#~ "environment, activate and skip to the" -#~ " last point where all packages are" -#~ " installed." +#~ "``--ssl-certfile certificates/server.pem``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "If in a hurry, bypass the hook " -#~ "using ``--no-verify`` with the ``git " -#~ "commit`` command. ::" +#~ "The ``certificates/server.pem`` file is used" +#~ " to identify the SuperExec and to " +#~ "encrypt the" #~ msgstr "" #~ msgid "" -#~ "Flower's documentation uses `Sphinx " -#~ "`_. There's no " -#~ "convenience script to re-build the " -#~ "documentation yet, but it's pretty " -#~ "easy::" +#~ "``--ssl-keyfile certificates/server.key``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "Some quickstart examples may have " -#~ "limitations or requirements that prevent " -#~ "them from running on every environment." -#~ " For more information, please see " -#~ "`Limitations`_." +#~ "``--executor-config root-" +#~ "certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify" +#~ " the" #~ msgstr "" #~ msgid "" -#~ "Change the application code. For " -#~ "example, change the ``seed`` in " -#~ "``quickstart_docker/task.py`` to ``43`` and " -#~ "save it:" +#~ "location of the CA certificate file " +#~ "inside the container that the SuperExec" +#~ " executor" #~ msgstr "" -#~ msgid ":code:`fit`" -#~ msgstr ":code:`fit`" +#~ msgid "should use to verify the SuperLink's identity." +#~ msgstr "" #~ msgid "" -#~ "\\small\n" -#~ "\\frac{∆ \\times \\sqrt{2 \\times " -#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -#~ "\n" +#~ "In this mode, the ClientApp is " +#~ "executed as a subprocess within the " +#~ "SuperNode Docker container, rather than " +#~ "running in a separate container. This" +#~ " approach reduces the number of " +#~ "running containers, which can be " +#~ "beneficial for environments with limited " +#~ "resources. However, it also means that" +#~ " the ClientApp is no longer isolated" +#~ " from the SuperNode, which may " +#~ "introduce additional security concerns." #~ msgstr "" -#~ msgid "Enable node authentication in :code:`SuperLink`" +#~ msgid "" +#~ "Before running the ClientApp as a " +#~ "subprocess, ensure that the FAB " +#~ "dependencies have been installed in the" +#~ " SuperNode images. This can be done" +#~ " by extending the SuperNode image:" #~ msgstr "" +#~ msgid "Dockerfile.supernode" +#~ msgstr "Serveur de Flower" + #~ msgid "" -#~ "To enable node authentication, first you" -#~ " need to configure SSL/TLS connections " -#~ "to secure the SuperLink<>SuperNode " -#~ "communication. You can find the complete" -#~ " guide `here `_. After " -#~ "configuring secure connections, you can " -#~ "enable client authentication in a " -#~ "long-running Flower :code:`SuperLink`. Use " -#~ "the following terminal command to start" -#~ " a Flower :code:`SuperNode` that has " -#~ "both secure connections and node " -#~ "authentication enabled:" +#~ "Next, build the SuperNode Docker image" +#~ " by running the following command in" +#~ " the directory where Dockerfile is " +#~ "located:" #~ msgstr "" -#~ msgid "" -#~ "The first flag :code:`--auth-list-" -#~ "public-keys` expects a path to a " -#~ "CSV file storing all known node " -#~ "public keys. You need to store all" -#~ " known node public keys that are " -#~ "allowed to participate in a federation" -#~ " in one CSV file (:code:`.csv`)." +#~ msgid "Run the ClientApp as a Subprocess" #~ msgstr "" #~ msgid "" -#~ "The second and third flags :code" -#~ ":`--auth-superlink-private-key` and :code" -#~ ":`--auth-superlink-public-key` expect paths" -#~ " to the server's private and public" -#~ " keys. For development purposes, you " -#~ "can generate a private and public " -#~ "key pair using :code:`ssh-keygen -t " -#~ "ecdsa -b 384`." +#~ "Start the SuperNode with the flag " +#~ "``--isolation subprocess``, which tells the" +#~ " SuperNode to execute the ClientApp " +#~ "as a subprocess:" #~ msgstr "" -#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgid "Run the example and follow the logs of the ServerApp:" #~ msgstr "" #~ msgid "" -#~ "Similar to the long-running Flower " -#~ "server (:code:`SuperLink`), you can easily " -#~ "enable node authentication in the " -#~ "long-running Flower client (:code:`SuperNode`)." -#~ " Use the following terminal command " -#~ "to start an authenticated :code:`SuperNode`:" +#~ "That is all it takes! You can " +#~ "monitor the progress of the run " +#~ "through the logs of the SuperExec." #~ msgstr "" #~ msgid "" -#~ "The :code:`--auth-supernode-private-key` " -#~ "flag expects a path to the node's" -#~ " private key file and the :code" -#~ ":`--auth-supernode-public-key` flag expects" -#~ " a path to the node's public " -#~ "key file. For development purposes, you" -#~ " can generate a private and public" -#~ " key pair using :code:`ssh-keygen -t" -#~ " ecdsa -b 384`." +#~ "You will learn how to run the " +#~ "Flower client and server components on" +#~ " two separate machines, with Flower " +#~ "configured to use TLS encryption and " +#~ "persist SuperLink state across restarts. " +#~ "A server consists of a SuperLink " +#~ "and ``SuperExec``. For more details " +#~ "about the Flower architecture, refer to" +#~ " the :doc:`../explanation-flower-architecture`" +#~ " explainer page." #~ msgstr "" #~ msgid "" -#~ "You should now have learned how to" -#~ " start a long-running Flower server" -#~ " (:code:`SuperLink`) and client " -#~ "(:code:`SuperNode`) with node authentication " -#~ "enabled. You should also know the " -#~ "significance of the private key and " -#~ "store it safely to minimize security " -#~ "risks." +#~ "First, set the environment variables " +#~ "``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the" +#~ " IP address from the remote machine." +#~ " For example, if the IP is " +#~ "``192.168.2.33``, execute:" #~ msgstr "" #~ msgid "" -#~ "If you have not added ``conda-" -#~ "forge`` to your channels, you will " -#~ "first need to run the following::" +#~ "Log into the remote machine using " +#~ "``ssh`` and run the following command" +#~ " to start the SuperLink and SuperExec" +#~ " services:" #~ msgstr "" #~ msgid "" -#~ "Once the ``conda-forge`` channel has " -#~ "been enabled, ``flwr`` can be installed" -#~ " with ``conda``::" +#~ "Specify the remote SuperExec IP " +#~ "addresses and the path to the root" +#~ " certificate in the ``[tool.flwr.federations" +#~ ".remote-superexec]`` table in the " +#~ "``pyproject.toml`` file. Here, we have " +#~ "named our remote federation ``remote-" +#~ "superexec``:" #~ msgstr "" -#~ msgid "or with ``mamba``::" +#~ msgid "Run the project and follow the ServerApp logs:" #~ msgstr "" #~ msgid "" -#~ "For central DP with server-side " -#~ "clipping, there are two :code:`Strategy` " -#~ "classes that act as wrappers around " -#~ "the actual :code:`Strategy` instance (for " -#~ "example, :code:`FedAvg`). The two wrapper " -#~ "classes are " -#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " -#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " -#~ "fixed and adaptive clipping." +#~ "``-p 9091:9091 -p 9092:9092``: Map port" +#~ " ``9091`` and ``9092`` of the " +#~ "container to the same port of" +#~ msgstr "" + +#~ msgid "the host machine, allowing other services to access the Driver API on" #~ msgstr "" #~ msgid "" -#~ "The code sample below enables the " -#~ ":code:`FedAvg` strategy to use server-" -#~ "side fixed clipping using the " -#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " -#~ "class. The same approach can be " -#~ "used with " -#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " -#~ "adjusting the corresponding input parameters." +#~ "``http://localhost:9091`` and the Fleet API" +#~ " on ``http://localhost:9092``." #~ msgstr "" #~ msgid "" -#~ "For central DP with client-side " -#~ "clipping, the server sends the clipping" -#~ " value to selected clients on each" -#~ " round. Clients can use existing " -#~ "Flower :code:`Mods` to perform the " -#~ "clipping. Two mods are available for " -#~ "fixed and adaptive client-side clipping:" -#~ " :code:`fixedclipping_mod` and " -#~ ":code:`adaptiveclipping_mod` with corresponding " -#~ "server-side wrappers " -#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " -#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +#~ "``flwr/supernode:|stable_flwr_version|``: This is " +#~ "the name of the image to be " +#~ "run and the specific tag" #~ msgstr "" #~ msgid "" -#~ "The code sample below enables the " -#~ ":code:`FedAvg` strategy to use differential" -#~ " privacy with client-side fixed " -#~ "clipping using both the " -#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " -#~ "class and, on the client, " -#~ ":code:`fixedclipping_mod`:" +#~ "``--supernode-address 0.0.0.0:9094``: Set the" +#~ " address and port number that the " +#~ "SuperNode" +#~ msgstr "" + +#~ msgid "is listening on." +#~ msgstr "" + +#~ msgid "Step 4: Start the ClientApp" #~ msgstr "" #~ msgid "" -#~ "In addition to the server-side " -#~ "strategy wrapper, the :code:`ClientApp` needs" -#~ " to configure the matching " -#~ ":code:`fixedclipping_mod` to perform the " -#~ "client-side clipping:" +#~ "The ClientApp Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own ClientApp image. In" +#~ " order to install the FAB " +#~ "dependencies, you will need to create" +#~ " a Dockerfile that extends the " +#~ "ClientApp image and installs the " +#~ "required dependencies." #~ msgstr "" -#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#~ msgid "" +#~ "Create a ClientApp Dockerfile called " +#~ "``Dockerfile.clientapp`` and paste the " +#~ "following code into it:" #~ msgstr "" +#~ msgid "Dockerfile.clientapp" +#~ msgstr "Flower ClientApp." + #~ msgid "" -#~ "Note that since version :code:`1.11.0`, " -#~ ":code:`flower-server-app` no longer " -#~ "supports passing a reference to a " -#~ "`ServerApp` attribute. Instead, you need " -#~ "to pass the path to Flower app " -#~ "via the argument :code:`--app`. This is" -#~ " the path to a directory containing" -#~ " a `pyproject.toml`. You can create a" -#~ " valid Flower app by executing " -#~ ":code:`flwr new` and following the " -#~ "prompt." +#~ "to be built from is the " +#~ "``flwr/clientapp image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this information beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "``--supernode supernode-1:9094``: Connect to " +#~ "the SuperNode's Fleet API at the " +#~ "address" +#~ msgstr "" + +#~ msgid "``supernode-1:9094``." #~ msgstr "" #~ msgid "" -#~ "Prior to local training, we need " -#~ "to load the MNIST dataset, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning, " -#~ "and partition the dataset for FL. " -#~ "This can be conveniently achieved using" -#~ " `Flower Datasets `_." -#~ " The :code:`FederatedDataset.load_partition()` method" -#~ " loads the partitioned training set " -#~ "for each partition ID defined in " -#~ "the :code:`--partition-id` argument." +#~ "The procedure for building and running" +#~ " a SuperExec image is almost " +#~ "identical to the ClientApp image." #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a federated XGBoost model" -#~ " on HIGGS dataset using Flower and" -#~ " :code:`xgboost` package. We use a " -#~ "simple example (`full code xgboost-" -#~ "quickstart `_) with two *clients* " -#~ "and one *server* to demonstrate how " -#~ "federated XGBoost works, and then we " -#~ "dive into a more complex example " -#~ "(`full code xgboost-comprehensive " -#~ "`_) to run various experiments." +#~ "Similar to the ClientApp image, you " +#~ "will need to create a Dockerfile " +#~ "that extends the SuperExec image and " +#~ "installs the required FAB dependencies." #~ msgstr "" #~ msgid "" -#~ "In this example, we split the " -#~ "dataset into 30 partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." -#~ " Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`partition_id`:" +#~ "Create a SuperExec Dockerfile called " +#~ "``Dockerfile.superexec`` and paste the " +#~ "following code in:" +#~ msgstr "" + +#~ msgid "Dockerfile.superexec" #~ msgstr "" #~ msgid "" -#~ "After that, we do train/test splitting" -#~ " on the given partition (client's " -#~ "local data), and transform data format" -#~ " for :code:`xgboost` package." +#~ ":substitution-code:`FROM " +#~ "flwr/superexec:|stable_flwr_version|`: This line " +#~ "specifies that the Docker image" #~ msgstr "" #~ msgid "" -#~ "The functions of :code:`train_test_split` and" -#~ " :code:`transform_dataset_to_dmatrix` are defined " -#~ "as below:" +#~ "to be built from is the " +#~ "``flwr/superexec image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "The :code:`num_local_round` represents the " -#~ "number of iterations for local tree " -#~ "boost. We use CPU for the training" -#~ " in default. One can shift it " -#~ "to GPU by setting :code:`tree_method` to" -#~ " :code:`gpu_hist`. We use AUC as " -#~ "evaluation metric." +#~ "``ENTRYPOINT [\"flower-superexec\"``: Set the" +#~ " command ``flower-superexec`` to be" +#~ msgstr "" + +#~ msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +#~ msgstr "" + +#~ msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." #~ msgstr "" #~ msgid "" -#~ "After loading the dataset we define " -#~ "the Flower client. We follow the " -#~ "general rule to define :code:`XgbClient` " -#~ "class inherited from :code:`fl.client.Client`." +#~ "Afterward, in the directory that holds" +#~ " the Dockerfile, execute this Docker " +#~ "command to build the SuperExec image:" #~ msgstr "" #~ msgid "" -#~ "All required parameters defined above " -#~ "are passed to :code:`XgbClient`'s constructor." +#~ "``-p 9093:9093``: Map port ``9093`` of" +#~ " the container to the same port " +#~ "of" #~ msgstr "" #~ msgid "" -#~ "Then, we override :code:`get_parameters`, " -#~ ":code:`fit` and :code:`evaluate` methods " -#~ "insides :code:`XgbClient` class as follows." +#~ "the host machine, allowing you to " +#~ "access the SuperExec API on " +#~ "``http://localhost:9093``." +#~ msgstr "" + +#~ msgid "``--name superexec``: Assign the name ``superexec`` to the container." #~ msgstr "" #~ msgid "" -#~ "Unlike neural network training, XGBoost " -#~ "trees are not started from a " -#~ "specified random weights. In this case," -#~ " we do not use :code:`get_parameters` " -#~ "and :code:`set_parameters` to initialise model" -#~ " parameters for XGBoost. As a result," -#~ " let's return an empty tensor in " -#~ ":code:`get_parameters` when it is called " -#~ "by the server at the first round." +#~ "``flwr_superexec:0.0.1``: This is the name " +#~ "of the image to be run and " +#~ "the specific tag" #~ msgstr "" #~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. From " -#~ "the second round, we load the " -#~ "global model sent from server to " -#~ "new build Booster object, and then " -#~ "update model weights on local training" -#~ " data with function :code:`local_boost` as" -#~ " follows:" +#~ "``--executor-config superlink=\\\"superlink:9091\\\"``:" +#~ " Configure the SuperExec executor to" +#~ msgstr "" + +#~ msgid "connect to the SuperLink running on port ``9091``." +#~ msgstr "" + +#~ msgid "Stop the current ClientApp containers:" +#~ msgstr "" + +#~ msgid "Launch two new ClientApp containers based on the newly built image:" #~ msgstr "" #~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`bst_input.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." +#~ "Setting the ``PROJECT_DIR`` helps Docker " +#~ "Compose locate the ``pyproject.toml`` file," +#~ " allowing it to install dependencies " +#~ "in the SuperExec and SuperNode images" +#~ " correctly." #~ msgstr "" #~ msgid "" -#~ "In :code:`evaluate`, after loading the " -#~ "global model, we call :code:`bst.eval_set` " -#~ "function to conduct evaluation on valid" -#~ " set. The AUC value will be " -#~ "returned." +#~ "To ensure the ``flwr`` CLI connects " +#~ "to the SuperExec, you need to " +#~ "specify the SuperExec addresses in the" +#~ " ``pyproject.toml`` file." #~ msgstr "" #~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients. The :code:`config_func` function is" -#~ " to return the current FL round " -#~ "number to client's :code:`fit()` and " -#~ ":code:`evaluate()` methods." +#~ "Run the quickstart example, monitor the" +#~ " ServerApp logs and wait for the " +#~ "summary to appear:" +#~ msgstr "" + +#~ msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#~ msgstr "" + +#~ msgid "Step 7: Add another SuperNode" #~ msgstr "" #~ msgid "" -#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," -#~ " we define :code:`FedXgbBagging` inherited " -#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" -#~ " override the :code:`aggregate_fit`, " -#~ ":code:`aggregate_evaluate` and :code:`evaluate` " -#~ "methods as follows:" +#~ "You can add more SuperNodes and " +#~ "ClientApps by duplicating their definitions" +#~ " in the ``compose.yml`` file." #~ msgstr "" #~ msgid "" -#~ "In :code:`aggregate_fit`, we sequentially " -#~ "aggregate the clients' XGBoost trees by" -#~ " calling :code:`aggregate()` function:" +#~ "Just give each new SuperNode and " +#~ "ClientApp service a unique service name" +#~ " like ``supernode-3``, ``clientapp-3``, etc." +#~ msgstr "" + +#~ msgid "In ``compose.yml``, add the following:" #~ msgstr "" #~ msgid "" -#~ "In this function, we first fetch " -#~ "the number of trees and the number" -#~ " of parallel trees for the current" -#~ " and previous model by calling " -#~ ":code:`_get_tree_nums`. Then, the fetched " -#~ "information will be aggregated. After " -#~ "that, the trees (containing model " -#~ "weights) are aggregated to generate a" -#~ " new tree model." +#~ "If you also want to enable TLS " +#~ "for the new SuperNodes, duplicate the" +#~ " SuperNode definition for each new " +#~ "SuperNode service in the ``with-" +#~ "tls.yml`` file." #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated XGBoost " -#~ "system. The AUC values can be " -#~ "checked in :code:`metrics_distributed`. One " -#~ "can see that the average AUC " -#~ "increases over FL rounds." +#~ "Make sure that the names of the" +#~ " services match with the one in " +#~ "the ``compose.yml`` file." +#~ msgstr "" + +#~ msgid "In ``with-tls.yml``, add the following:" +#~ msgstr "" + +#~ msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" #~ msgstr "" #~ msgid "" -#~ "To do this, we first customise a" -#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ "This guide is for users who have" +#~ " already worked with Flower 0.x and" +#~ " want to upgrade to Flower 1.0. " +#~ "Newer versions of Flower (1.12+) are " +#~ "based on a new architecture (previously" +#~ " called Flower Next) and not covered" +#~ " in this guide. After upgrading " +#~ "Flower 0.x projects to Flower 1.0, " +#~ "please refer to :doc:`Upgrade to Flower" +#~ " Next ` to make your project compatible" +#~ " with the lastest version of Flower." #~ msgstr "" +#~ msgid "Upgrade to Flower Next" +#~ msgstr "Passe à Flower 1.0" + #~ msgid "" -#~ "The customised :code:`ClientManager` samples " -#~ "all available clients in each FL " -#~ "round based on the order of " -#~ "connection to the server. Then, we " -#~ "define a new strategy :code:`FedXgbCyclic` " -#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " -#~ "order to sequentially select only one" -#~ " client in given round and pass " -#~ "the received model to next client." +#~ "Welcome to the migration guide for " +#~ "updating Flower to Flower Next! Whether" +#~ " you're a seasoned user or just " +#~ "getting started, this guide will help" +#~ " you smoothly transition your existing " +#~ "setup to take advantage of the " +#~ "latest features and improvements in " +#~ "Flower Next, starting from version 1.8." #~ msgstr "" #~ msgid "" -#~ "Unlike the original :code:`FedAvg`, we " -#~ "don't perform aggregation here. Instead, " -#~ "we just make a copy of the " -#~ "received client model as global model" -#~ " by overriding :code:`aggregate_fit`." +#~ "This guide shows how to reuse " +#~ "pre-``1.8`` Flower code with minimum " +#~ "code changes by using the *compatibility" +#~ " layer* in Flower Next. In another" +#~ " guide, we will show how to run" +#~ " Flower Next end-to-end with " +#~ "pure Flower Next APIs." #~ msgstr "" +#~ msgid "or if you need Flower Next with simulation:" +#~ msgstr "" + +#~ msgid "Using Poetry" +#~ msgstr "Utiliser la poésie (recommandé)" + #~ msgid "" -#~ "Also, the customised :code:`configure_fit` and" -#~ " :code:`configure_evaluate` methods ensure the" -#~ " clients to be sequentially selected " -#~ "given FL round:" +#~ "Update the ``flwr`` dependency in " +#~ "``pyproject.toml`` and then reinstall (don't" +#~ " forget to delete ``poetry.lock`` via " +#~ "``rm poetry.lock`` before running ``poetry " +#~ "install``)." #~ msgstr "" +#~ "Poetry : mettez à jour la " +#~ "dépendance ``flwr`` dans ``pyproject.toml`` " +#~ "puis réinstallez (n'oubliez pas de " +#~ "supprimer ``poetry.lock`` via ``rm " +#~ "poetry.lock`` avant d'exécuter ``poetry " +#~ "install``)." #~ msgid "" -#~ "In :code:`dataset.py`, we have a " -#~ "function :code:`instantiate_partitioner` to " -#~ "instantiate the data partitioner based " -#~ "on the given :code:`num_partitions` and " -#~ ":code:`partitioner_type`. Currently, we provide " -#~ "four supported partitioner type to " -#~ "simulate the uniformity/non-uniformity in " -#~ "data quantity (uniform, linear, square, " -#~ "exponential)." +#~ "Ensure you set the following version " +#~ "constraint in your ``pyproject.toml``:" +#~ msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#~ msgid "" +#~ "In Flower Next, the *infrastructure* and" +#~ " *application layers* have been decoupled." +#~ " Instead of starting a client in " +#~ "code via ``start_client()``, you create " +#~ "a |clientapp_link|_ and start it via " +#~ "the command line. Instead of starting" +#~ " a server in code via " +#~ "``start_server()``, you create a " +#~ "|serverapp_link|_ and start it via the" +#~ " command line. The long-running " +#~ "components of server and client are " +#~ "called SuperLink and SuperNode. The " +#~ "following non-breaking changes that " +#~ "require manual updates and allow you " +#~ "to run your project both in the" +#~ " traditional way and in the Flower" +#~ " Next way:" #~ msgstr "" #~ msgid "" -#~ "To facilitate centralised evaluation, we " -#~ "define a function in :code:`server_utils.py`:" +#~ "Wrap your existing client with " +#~ "|clientapp_link|_ instead of launching it " +#~ "via |startclient_link|_. Here's an example:" #~ msgstr "" #~ msgid "" -#~ "This function returns a evaluation " -#~ "function which instantiates a :code:`Booster`" -#~ " object and loads the global model" -#~ " weights to it. The evaluation is " -#~ "conducted by calling :code:`eval_set()` " -#~ "method, and the tested AUC value " -#~ "is reported." +#~ "Wrap your existing strategy with " +#~ "|serverapp_link|_ instead of starting the " +#~ "server via |startserver_link|_. Here's an " +#~ "example:" #~ msgstr "" #~ msgid "" -#~ "As for distributed evaluation on the " -#~ "clients, it's same as the quick-" -#~ "start example by overriding the " -#~ ":code:`evaluate()` method insides the " -#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ "Run the ``SuperLink`` using " +#~ "|flowernext_superlink_link|_ before running, in " +#~ "sequence, |flowernext_clientapp_link|_ (2x) and " +#~ "|flowernext_serverapp_link|_. There is no need" +#~ " to execute `client.py` and `server.py` " +#~ "as Python scripts." #~ msgstr "" #~ msgid "" -#~ "We also provide an example code " -#~ "(:code:`sim.py`) to use the simulation " -#~ "capabilities of Flower to simulate " -#~ "federated XGBoost training on either a" -#~ " single machine or a cluster of " -#~ "machines." +#~ "Here's an example to start the " +#~ "server without HTTPS (only for " +#~ "prototyping):" #~ msgstr "" #~ msgid "" -#~ "After importing all required packages, " -#~ "we define a :code:`main()` function to" -#~ " perform the simulation process:" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--ssl-ca-certfile``," +#~ " ``--ssl-certfile``, and ``--ssl-keyfile``" +#~ " command line options to pass paths" +#~ " to (CA certificate, server certificate," +#~ " and server private key)." #~ msgstr "" #~ msgid "" -#~ "We first load the dataset and " -#~ "perform data partitioning, and the " -#~ "pre-processed data is stored in a " -#~ ":code:`list`. After the simulation begins, " -#~ "the clients won't need to pre-" -#~ "process their partitions again." +#~ "Wrap your existing client and strategy" +#~ " with |clientapp_link|_ and |serverapp_link|_," +#~ " respectively. There is no need to" +#~ " use |startsim_link|_ anymore. Here's an" +#~ " example:" #~ msgstr "" #~ msgid "" -#~ "After that, we start the simulation " -#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ "Run |flower_simulation_link|_ in CLI and " +#~ "point to the ``server_app`` / " +#~ "``client_app`` object in the code " +#~ "instead of executing the Python script." +#~ " Here's an example (assuming the " +#~ "``server_app`` and ``client_app`` objects are" +#~ " in a ``sim.py`` module):" #~ msgstr "" #~ msgid "" -#~ "One of key parameters for " -#~ ":code:`start_simulation` is :code:`client_fn` which" -#~ " returns a function to construct a" -#~ " client. We define it as follows:" +#~ "Set default resources for each " +#~ "|clientapp_link|_ using the ``--backend-" +#~ "config`` command line argument instead " +#~ "of setting the ``client_resources`` argument" +#~ " in |startsim_link|_. Here's an example:" +#~ msgstr "" + +#~ msgid "Simulation in a Notebook" #~ msgstr "" #~ msgid "" -#~ "In :code:`utils.py`, we define the " -#~ "arguments parsers for clients, server " -#~ "and simulation, allowing users to " -#~ "specify different experimental settings. Let's" -#~ " first see the sever side:" +#~ "Run |runsim_link|_ in your notebook " +#~ "instead of |startsim_link|_. Here's an " +#~ "example:" #~ msgstr "" #~ msgid "" -#~ "This allows user to specify training " -#~ "strategies / the number of total " -#~ "clients / FL rounds / participating " -#~ "clients / clients for evaluation, and" -#~ " evaluation fashion. Note that with " -#~ ":code:`--centralised-eval`, the sever will " -#~ "do centralised evaluation and all " -#~ "functionalities for client evaluation will " -#~ "be disabled." +#~ "As we continuously enhance Flower Next" +#~ " at a rapid pace, we'll be " +#~ "periodically updating this guide. Please " +#~ "feel free to share any feedback " +#~ "with us!" #~ msgstr "" #~ msgid "" -#~ "This defines various options for client" -#~ " data partitioning. Besides, clients also" -#~ " have an option to conduct evaluation" -#~ " on centralised test set by setting" -#~ " :code:`--centralised-eval`, as well as " -#~ "an option to perform scaled learning " -#~ "rate based on the number of " -#~ "clients by setting :code:`--scaled-lr`." +#~ "This function is deprecated since " +#~ "1.13.0. Use :code: `flwr run` to " +#~ "start a Flower simulation." #~ msgstr "" -#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgid "|c9344c3dfee24383908fabaac40a8504|" #~ msgstr "" -#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgid "|c10cd8f2177641bd8091c7b76d318ff9|" #~ msgstr "" -#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgid "|3c59c315e67945ea8b839381c5deb6c2|" #~ msgstr "" -#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgid "|eadf87e1e20549789512f7aa9199fcff|" #~ msgstr "" -#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgid "|66ce8f21aeb443fca1fc88f727458417|" #~ msgstr "" -#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgid "|f5768015a1014396b4761bb6cb3677f5|" #~ msgstr "" -#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgid "|a746aa3f56064617a4e00f4c6a0cb140|" #~ msgstr "" -#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgid "|cf8f676dd3534a44995c1b40910fd030|" #~ msgstr "" -#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgid "|d1c0e3a4c9dc4bfd88ee6f1fe626edaf|" #~ msgstr "" -#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgid "|1d8d6298a4014ec3a717135bcc7a94f9|" #~ msgstr "" -#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgid "|e3ea79200ff44d459358b9f4713e582b|" #~ msgstr "" -#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgid "|3e1061718a4a49d485764d30a4bfecdd|" #~ msgstr "" -#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgid "|7750e597d1ea4e319f7e0a40539bf214|" #~ msgstr "" -#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgid "|dd4434075f374e99ac07f509a883778f|" #~ msgstr "" -#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgid "Other changes" +#~ msgstr "Changements incompatibles" + +#~ msgid "|cf5fe148406b44b9a8b842fb01b5a7ea|" #~ msgstr "" -#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgid "|ba25c91426d64cc1ae2d3febc5715b35|" #~ msgstr "" -#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgid "|fca67f83aaab4389aa9ebb4d9c5cd75e|" #~ msgstr "" -#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgid "|6f2e8f95c95443379b0df00ca9824654|" #~ msgstr "" -#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgid "|c0ab3a1a733d4dbc9e1677aa608e8038|" #~ msgstr "" -#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgid "|8f0491bde07341ab9f2e23d50593c0be|" #~ msgstr "" -#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgid "|762fc099899943688361562252c5e600|" #~ msgstr "" -#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgid "|f62d365fd0ae405b975d3ca01e7183fd|" #~ msgstr "" -#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgid "|2c78fc1816b143289f4d909388f92a80|" #~ msgstr "" -#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgid "|4230725aeebe497d8ad84a3efc2a912b|" #~ msgstr "" -#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgid "|64b66a88417240eabe52f5cc55d89d0b|" #~ msgstr "" -#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgid "|726c8eca58bc4f859b06aa24a587b253|" #~ msgstr "" -#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgid "|f9d869e4b33c4093b29cf24ed8dff80a|" #~ msgstr "" -#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgid "|4ab50bc01a9f426a91a2c0cbc3ab7a84|" #~ msgstr "" -#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgid "Request for Flower Baselines" +#~ msgstr "Demande pour une nouvelle Flower Baseline" + +#~ msgid "Request for examples" +#~ msgstr "Demande pour un nouveau Flower Example" + +#~ msgid "|f150b8d6e0074250822c9f6f7a8de3e0|" #~ msgstr "" -#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgid "|72772d10debc4abd8373c0bc82985422|" #~ msgstr "" -#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgid "|5815398552ad41d290a3a2631fe8f6ca|" #~ msgstr "" -#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgid "|e6ac20744bf149378be20ac3dc309356|" #~ msgstr "" -#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgid "|a4011ef443c14725b15a8cf33b0e3443|" #~ msgstr "" -#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgid "|a22faa3617404c06803731525e1c609f|" #~ msgstr "" -#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgid "|84a5c9b5041c43c3beab9786197c3e4e|" #~ msgstr "" -#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgid "|b5c4be0b52d4493ba8c4af14d7c2db97|" #~ msgstr "" -#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgid "|c1c784183d18481186ff65dc261d1335|" #~ msgstr "" -#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgid "|669fcd1f44ab42f5bbd196c3cf1ecbc2|" #~ msgstr "" -#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgid "|edfb08758c9441afb6736045a59e154c|" #~ msgstr "" -#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgid "|82338b8bbad24d5ea9df3801aab37852|" #~ msgstr "" -#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgid "|518d994dd2c844898b441da03b858326|" #~ msgstr "" -#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgid "|7bfcfcb57ae5403f8e18486f45ca48b4|" #~ msgstr "" diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index 424eaf5f86a2..810bc10e99b3 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"POT-Creation-Date: 2024-11-30 00:31+0000\n" "PO-Revision-Date: 2024-08-23 13:09+0000\n" "Last-Translator: Seulki Yun \n" "Language: ko\n" @@ -956,10 +956,11 @@ msgstr "" "순서대로 수행되어야 합니다:" #: ../../source/contributor-how-to-release-flower.rst:13 +#, fuzzy msgid "" -"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " -"order to add every new change to the changelog (feel free to make manual " -"changes to the changelog afterwards until it looks good)." +"Run ``python3 ./dev/update_changelog.py `` in order to add" +" every new change to the changelog (feel free to make manual changes to " +"the changelog afterwards until it looks good)." msgstr "" "모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 " "src/py/flwr_tool/update_changelog.py ``을 실행합니다 (변경 로그가 " @@ -1325,10 +1326,10 @@ msgid "Where to start" msgstr "시작 위치" #: ../../source/contributor-ref-good-first-contributions.rst:11 +#, fuzzy msgid "" -"Until the Flower core library matures it will be easier to get PR's " -"accepted if they only touch non-core areas of the codebase. Good " -"candidates to get started are:" +"In general, it is easier to get PR's accepted if they only touch non-core" +" areas of the codebase. Good candidates to get started are:" msgstr "" "Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 것이 PR을 승인받기가 더 쉬울 것입니다. " "시작하기에 좋은 후보자는 다음과 같습니다:" @@ -1338,116 +1339,132 @@ msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "문서: 무엇이 누락되었나요? 무엇을 더 명확하게 표현할 수 있을까요?" #: ../../source/contributor-ref-good-first-contributions.rst:15 +#, python-format +msgid "" +"Open issues: Issues with the tag `good first issue " +"`_." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Baselines: See below." msgstr "Baselines: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:16 +#: ../../source/contributor-ref-good-first-contributions.rst:18 msgid "Examples: See below." msgstr "예시: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:19 -msgid "Request for Flower Baselines" +#: ../../source/contributor-ref-good-first-contributions.rst:21 +#, fuzzy +msgid "Flower Baselines" msgstr "Flower Baselines 요청" -#: ../../source/contributor-ref-good-first-contributions.rst:21 +#: ../../source/contributor-ref-good-first-contributions.rst:23 +#, fuzzy msgid "" -"If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines " -"`_." +"If you are not familiar with Flower Baselines, please check our " +"`contributing guide for baselines `_." msgstr "" "Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 " "`_를 " "확인해보세요." -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:26 +#, fuzzy msgid "" -"You should then check out the open `issues " +"Then take a look at the open `issues " "`_" -" for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignees, feel free to assign it to yourself and start " -"working on it!" +" for baseline requests. If you find a baseline that you'd like to work " +"on, and it has no assignees, feel free to assign it to yourself and get " +"started!" msgstr "" "그런 다음 오픈 된 `issues " "`_에서" " baseline 요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, 자유롭게 자신에게 할당하고 작업을 " "시작하세요!" -#: ../../source/contributor-ref-good-first-contributions.rst:30 +#: ../../source/contributor-ref-good-first-contributions.rst:31 +#, fuzzy msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to" -" open a new issue with the baseline request template!" +"If you don't find the baseline you'd like to work on, be sure to open a " +"new issue with the baseline request template!" msgstr "그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 새 이슈를 열어야 합니다!" -#: ../../source/contributor-ref-good-first-contributions.rst:34 -msgid "Request for examples" -msgstr "예시 요청" +#: ../../source/contributor-ref-good-first-contributions.rst:35 +#, fuzzy +msgid "Usage examples" +msgstr "예시" -#: ../../source/contributor-ref-good-first-contributions.rst:36 +#: ../../source/contributor-ref-good-first-contributions.rst:37 +#, fuzzy msgid "" -"We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are" -" a few ideas where we'd be happy to accept a PR:" +"We wish we had more time to write usage examples because they help users " +"to get started with building what they want. If you notice any missing " +"examples that could help others, feel free to contribute!" msgstr "" "사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 더 많은 시간을 할애하여 작성할 수 있었으면 합니다. " "다음은 저희가 기꺼이 PR을 수락할 수 있는 몇 가지 아이디어입니다:" -#: ../../source/contributor-ref-good-first-contributions.rst:40 -msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "Llama 2 미세 조정, Hugging Face Transformer와 파이토치 포함" - -#: ../../source/contributor-ref-good-first-contributions.rst:41 -msgid "XGBoost" -msgstr "XGBoost" - -#: ../../source/contributor-ref-good-first-contributions.rst:42 -msgid "Android ONNX on-device training" -msgstr "Android ONNX 온디바이스 훈련" - #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" msgstr "Secure Aggregation 프로토콜" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:6 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " -"protocol has not been implemented yet, so its diagram and abstraction may" -" not be accurate in practice. The SecAgg protocol can be considered as a " -"special case of the SecAgg+ protocol." +"While this term might be used in other places, here it refers to a series" +" of protocols, including ``SecAgg``, ``SecAgg+``, ``LightSecAgg``, " +"``FastSecAgg``, etc. This concept was first proposed by Bonawitz et al. " +"in `Practical Secure Aggregation for Federated Learning on User-Held Data" +" `_." msgstr "" -"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아직 구현되지 않았기 " -"때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 " -"간주할 수 있습니다." - -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 -#, fuzzy -msgid "The ``SecAgg+`` abstraction" -msgstr "The :code:`SecAgg+` 추상화" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" -"In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have" -" keys of int type rather than ClientProxy type." +"Secure Aggregation protocols are used to securely aggregate model updates" +" from multiple clients while keeping the updates private. This is done by" +" encrypting the model updates before sending them to the server. The " +"server can decrypt only the aggregated model update without being able to" +" inspect individual updates." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:16 +msgid "" +"Flower now provides the ``SecAgg`` and ``SecAgg+`` protocols. While we " +"plan to implement more protocols in the future, one may also implement " +"their own custom secure aggregation protocol via low-level APIs." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:21 +msgid "The ``SecAgg+`` protocol in Flower" msgstr "" -"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " -"dictionaries에는 ClientProxy 타입이 아닌 int 타입의 키가 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:23 msgid "" -"The Flower server will execute and process received results in the " -"following order:" -msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" +"The ``SecAgg+`` protocol is implemented using the ``SecAggPlusWorkflow`` " +"in the ``ServerApp`` and the ``secaggplus_mod`` in the ``ClientApp``. The" +" ``SecAgg`` protocol is a special case of the ``SecAgg+`` protocol, and " +"one may use ``SecAggWorkflow`` and ``secagg_mod`` for that." +msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 -#, fuzzy -msgid "The ``LightSecAgg`` abstraction" -msgstr "The :code:`LightSecAgg` 추상" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:28 +msgid "" +"You may find a detailed example in the `Secure Aggregation Example " +"`_. The " +"documentation for the ``SecAgg+`` protocol configuration is available at " +"`SecAggPlusWorkflow `_." +msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 -msgid "Types" -msgstr "타입" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:33 +msgid "" +"The logic of the ``SecAgg+`` protocol is illustrated in the following " +"sequence diagram: the dashed lines represent communication over the " +"network, and the solid lines represent communication within the same " +"process. The ``ServerApp`` is connected to ``SuperLink``, and the " +"``ClientApp`` is connected to the ``SuperNode``; thus, the communication " +"between the ``ServerApp`` and the ``ClientApp`` is done via the " +"``SuperLink`` and the ``SuperNode``." +msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 msgid "Contribute on GitHub" @@ -2083,7 +2100,6 @@ msgstr "" ":code:`baselines` 기여를 살펴봐야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:357 -#: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "부록" @@ -2165,7 +2181,6 @@ msgid "Get started as a contributor" msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/docker/run-as-subprocess.rst:11 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 @@ -2439,18 +2454,11 @@ msgstr "보안 연결을 위한 SSL 사용 설정" #: ../../source/docker/enable-tls.rst:4 msgid "" "When operating in a production environment, it is strongly recommended to" -" enable Transport Layer Security (TLS) for each Flower Component to " +" enable Transport Layer Security (TLS) for each Flower component to " "ensure secure communication." msgstr "" -#: ../../source/docker/enable-tls.rst:7 -#, fuzzy -msgid "" -"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 PEM으로 인코딩된 인증서 체인이 필요합니다." - -#: ../../source/docker/enable-tls.rst:12 +#: ../../source/docker/enable-tls.rst:9 msgid "" "For testing purposes, you can generate your own self-signed certificates." " The `Enable SSL connections `__ 페이지에 프로세스를 안내하는 섹션이 있습니다." -#: ../../source/docker/enable-tls.rst:17 +#: ../../source/docker/enable-tls.rst:16 #, fuzzy msgid "" "Because Flower containers, by default, run with a non-root user ``app``, " @@ -2472,7 +2480,7 @@ msgstr "" "``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " "사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/docker/enable-tls.rst:20 +#: ../../source/docker/enable-tls.rst:19 #, fuzzy msgid "" "For example, to change the user ID of all files in the ``certificates/`` " @@ -2482,64 +2490,94 @@ msgstr "" "``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " "사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:15 +#: ../../source/docker/enable-tls.rst:22 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " -"state``." +"certificates``." msgstr "" -#: ../../source/docker/enable-tls.rst:27 +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``subprocess``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:29 +msgid "" +"By default, the ServerApp is executed as a subprocess within the " +"SuperLink Docker container, and the ClientApp is run as a subprocess " +"within the SuperNode Docker container. You can learn more about the " +"different process modes here: :doc:`run-as-subprocess`." +msgstr "" + +#: ../../source/docker/enable-tls.rst:34 ../../source/docker/enable-tls.rst:119 +#, fuzzy +msgid "" +"To enable TLS between the SuperLink and SuperNode, as well as between the" +" SuperLink and the ``flwr`` CLI, you will need a PEM-encoded root " +"certificate, private key, and certificate chain." +msgstr "SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 PEM으로 인코딩된 인증서 체인이 필요합니다." + +#: ../../source/docker/enable-tls.rst:37 #, fuzzy -msgid "SuperLink" +msgid "**SuperLink**" msgstr "flower 초연결" -#: ../../source/docker/enable-tls.rst:29 +#: ../../source/docker/enable-tls.rst:39 +#, fuzzy msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container:" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local " +"directories into the SuperLink container:" msgstr "" +"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " +"때 ``--root-certificates`` 플래그를 사용하세요." #: ../../source/docker/enable-tls.rst msgid "Understanding the command" msgstr "" -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 -#: ../../source/docker/enable-tls.rst:125 -#: ../../source/docker/tutorial-quickstart-docker.rst:66 -#: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:217 -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/enable-tls.rst:54 ../../source/docker/enable-tls.rst:96 +#: ../../source/docker/enable-tls.rst:140 +#: ../../source/docker/enable-tls.rst:179 +#: ../../source/docker/enable-tls.rst:206 +#: ../../source/docker/enable-tls.rst:231 +#: ../../source/docker/tutorial-quickstart-docker.rst:68 +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:221 +#: ../../source/docker/tutorial-quickstart-docker.rst:303 #, fuzzy msgid "``docker run``: This tells Docker to run a container from an image." msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 -#: ../../source/docker/enable-tls.rst:126 -#: ../../source/docker/tutorial-quickstart-docker.rst:67 -#: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:218 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/enable-tls.rst:55 ../../source/docker/enable-tls.rst:97 +#: ../../source/docker/enable-tls.rst:141 +#: ../../source/docker/enable-tls.rst:180 +#: ../../source/docker/enable-tls.rst:207 +#: ../../source/docker/enable-tls.rst:232 +#: ../../source/docker/tutorial-quickstart-docker.rst:69 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"``--volume ./certificates/:/app/certificates/:ro``: Mount the " -"``certificates`` directory in" +"``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the " +"``superlink-certificates``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"the current working directory of the host machine as a read-only volume " -"at the" +"directory in the current working directory of the host machine as a read-" +"only volume" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``/app/certificates`` directory inside the container." -msgstr "" +#, fuzzy +msgid "at the ``/app/certificates`` directory inside the container." +msgstr "VSCode Dev Container에서 개발" #: ../../source/docker/enable-tls.rst msgid "" @@ -2551,17 +2589,8 @@ msgstr "" msgid "directory." msgstr "" -#: ../../source/docker/enable-tls.rst -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " -"the image to be run and the specific" -msgstr "" - -#: ../../source/docker/enable-tls.rst -msgid "" -"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " -"represents a specific version of the image." +#: ../../source/docker/enable-tls.rst:62 +msgid "````: The name of your SuperLink image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2626,23 +2655,12 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:72 +#: ../../source/docker/enable-tls.rst:79 #, fuzzy -msgid "SuperNode" +msgid "**SuperNode**" msgstr "run\\_supernode" -#: ../../source/docker/enable-tls.rst:74 -#, fuzzy -msgid "" -"Assuming that the ``ca.crt`` certificate already exists locally, we can " -"use the flag ``--volume`` to mount the local certificate into the " -"container's ``/app/`` directory." -msgstr "" -"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " -"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " -"때 ``--root-certificates`` 플래그를 사용하세요." - -#: ../../source/docker/enable-tls.rst:79 +#: ../../source/docker/enable-tls.rst:83 ../../source/docker/enable-tls.rst:189 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2650,24 +2668,24 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"current working directory of the host machine as a read-only volume at " -"the ``/app/ca.crt``" +"file from the ``superlink-certificates`` directory of the host machine as" +" a read-only" msgstr "" #: ../../source/docker/enable-tls.rst #, fuzzy -msgid "directory inside the container." +msgid "volume at the ``/app/ca.crt`` directory inside the container." msgstr "VSCode Dev Container에서 개발" -#: ../../source/docker/enable-tls.rst -msgid "" -":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " -"the image to be run and the specific" +#: ../../source/docker/enable-tls.rst:101 +msgid "````: The name of your SuperNode image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2680,60 +2698,198 @@ msgstr "" msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/docker/enable-tls.rst:105 -msgid "SuperExec" +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``process``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:109 +msgid "" +"In isolation mode ``process``, the ServerApp and ClientApp run in their " +"own processes. Unlike in isolation mode ``subprocess``, the SuperLink or " +"SuperNode does not attempt to create the respective processes; instead, " +"they must be created externally." +msgstr "" + +#: ../../source/docker/enable-tls.rst:113 +msgid "" +"It is possible to run only the SuperLink in isolation mode ``subprocess``" +" and the SuperNode in isolation mode ``process``, or vice versa, or even " +"both with isolation mode ``process``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:117 +msgid "**SuperLink and ServerApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:122 +#, fuzzy +msgid "" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local directory " +"into the SuperLink container:" +msgstr "" +"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " +"때 ``--root-certificates`` 플래그를 사용하세요." + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory in the current working directory of " +"the host" msgstr "" -#: ../../source/docker/enable-tls.rst:107 +#: ../../source/docker/enable-tls.rst msgid "" -"Assuming all files we need are in the local ``certificates`` directory " -"where the SuperExec will be executed from, we can use the flag " -"``--volume`` to mount the local directory into the ``/app/certificates/``" -" directory of the container:" +"machine as a read-only volume at the ``/app/certificates`` directory " +"inside the container." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " "the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "SuperExec." +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-certfile certificates/server.pem``: Specify the location of the " -"SuperExec's" +"``--isolation process``: Tells the SuperLink that the ServerApp is " +"created by separate" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "independent process. The SuperLink does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:168 +#: ../../source/docker/tutorial-quickstart-docker.rst:207 +#, fuzzy +msgid "Start the ServerApp container:" +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/enable-tls.rst:181 +msgid "````: The name of your ServerApp image to be run." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"The ``certificates/server.pem`` file is used to identify the SuperExec " -"and to encrypt the" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-keyfile certificates/server.key``: Specify the location of the " -"SuperExec's" +"unencrypted communication. Secure connections will be added in future " +"releases." +msgstr "" + +#: ../../source/docker/enable-tls.rst:185 +msgid "**SuperNode and ClientApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:192 +#, fuzzy +msgid "Start the SuperNode container:" +msgstr "이미 *서버*를 시작할 수 있습니다:" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt`` file from the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory of the host machine as a read-only " +"volume at the ``/app/ca.crt``" msgstr "" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "VSCode Dev Container에서 개발" + #: ../../source/docker/enable-tls.rst msgid "" -"``--executor-config root-" -"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"location of the CA certificate file inside the container that the " -"SuperExec executor" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "should use to verify the SuperLink's identity." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:220 +#, fuzzy +msgid "Start the ClientApp container:" +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/docker/enable-tls.rst:233 +msgid "````: The name of your ClientApp image to be run." +msgstr "" + +#: ../../source/docker/enable-tls.rst:237 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:54 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/docker/enable-tls.rst:239 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:330 +#, fuzzy +msgid "pyproject.toml" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/docker/enable-tls.rst:246 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "" +"The path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/enable-tls.rst:251 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/enable-tls.rst:254 +msgid "" +"In this example, ``local-deployment-tls`` has been used. Just remember to" +" replace ``local-deployment-tls`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" #: ../../source/docker/index.rst:2 @@ -2800,6 +2956,13 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + #: ../../source/docker/persist-superlink-state.rst:21 #, fuzzy msgid "" @@ -2921,48 +3084,135 @@ msgstr "SuperNode Dockerfile 만들기" #: ../../source/docker/run-as-subprocess.rst:2 #, fuzzy -msgid "Run ClientApp as a Subprocess" +msgid "Run ServerApp or ClientApp as a Subprocess" msgstr "린터 및 테스트 실행" #: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"In this mode, the ClientApp is executed as a subprocess within the " -"SuperNode Docker container, rather than running in a separate container. " -"This approach reduces the number of running containers, which can be " -"beneficial for environments with limited resources. However, it also " -"means that the ClientApp is no longer isolated from the SuperNode, which " -"may introduce additional security concerns." +"The SuperLink and SuperNode components support two distinct isolation " +"modes, allowing for flexible deployment and control:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:13 +#: ../../source/docker/run-as-subprocess.rst:7 msgid "" -"Before running the ClientApp as a subprocess, ensure that the FAB " -"dependencies have been installed in the SuperNode images. This can be " -"done by extending the SuperNode image:" +"Subprocess Mode: In this configuration (default), the SuperLink and " +"SuperNode take responsibility for launching the ServerApp and ClientApp " +"processes internally. This differs from the ``process`` isolation-mode " +"which uses separate containers, as demonstrated in the :doc:`tutorial-" +"quickstart-docker` guide." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:12 +msgid "" +"Using the ``subprocess`` approach reduces the number of running " +"containers, which can be beneficial for environments with limited " +"resources. However, it also means that the applications are not isolated " +"from their parent containers, which may introduce additional security " +"concerns." msgstr "" #: ../../source/docker/run-as-subprocess.rst:17 +msgid "" +"Process Mode: In this mode, the ServerApp and ClientApps run in " +"completely separate processes. Unlike the alternative Subprocess mode, " +"the SuperLink or SuperNode does not attempt to create or manage these " +"processes. Instead, they must be started externally." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:22 +msgid "" +"Both modes can be mixed for added flexibility. For instance, you can run " +"the SuperLink in ``subprocess`` mode while keeping the SuperNode in " +"``process`` mode, or vice versa." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:25 +msgid "" +"To run the SuperLink and SuperNode in isolation mode ``process``, refer " +"to the :doc:`tutorial-quickstart-docker` guide. To run them in " +"``subprocess`` mode, follow the instructions below." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:33 +#: ../../source/docker/run-as-subprocess.rst:74 #, fuzzy -msgid "Dockerfile.supernode" -msgstr "Flower SuperNode" +msgid "**Prerequisites**" +msgstr "전제 조건" + +#: ../../source/docker/run-as-subprocess.rst:35 +msgid "" +"1. Before running the ServerApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperLink images. This can be " +"done by extending the SuperLink image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:38 +#, fuzzy +msgid "superlink.Dockerfile" +msgstr "SuperNode Dockerfile 만들기" -#: ../../source/docker/run-as-subprocess.rst:31 +#: ../../source/docker/run-as-subprocess.rst:52 #, fuzzy msgid "" -"Next, build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile is located:" +"2. Next, build the SuperLink Docker image by running the following " +"command in the directory where Dockerfile is located:" msgstr "" "다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 SuperNode Docker " "이미지를 빌드합니다." -#: ../../source/docker/run-as-subprocess.rst:39 -msgid "Run the ClientApp as a Subprocess" +#: ../../source/docker/run-as-subprocess.rst:59 +#, fuzzy +msgid "**Run the ServerApp as a Subprocess**" +msgstr "린터 및 테스트 실행" + +#: ../../source/docker/run-as-subprocess.rst:61 +msgid "" +"Start the SuperLink and run the ServerApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "클라이언트앱" + +#: ../../source/docker/run-as-subprocess.rst:76 +msgid "" +"1. Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:80 +#, fuzzy +msgid "supernode.Dockerfile" +msgstr "SuperNode Dockerfile 만들기" + +#: ../../source/docker/run-as-subprocess.rst:94 +#, fuzzy +msgid "" +"2. Next, build the SuperNode Docker image by running the following " +"command in the directory where Dockerfile is located:" msgstr "" +"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 SuperNode Docker " +"이미지를 빌드합니다." + +#: ../../source/docker/run-as-subprocess.rst:101 +#, fuzzy +msgid "**Run the ClientApp as a Subprocess**" +msgstr "린터 및 테스트 실행" -#: ../../source/docker/run-as-subprocess.rst:41 +#: ../../source/docker/run-as-subprocess.rst:103 msgid "" -"Start the SuperNode with the flag ``--isolation subprocess``, which tells" -" the SuperNode to execute the ClientApp as a subprocess:" +"Start the SuperNode and run the ClientApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 @@ -3010,7 +3260,9 @@ msgstr "Docker 데몬이 실행 중인지 확인하십시오." #: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 -msgid "Docker Compose is `installed `_." +msgid "" +"Docker Compose V2 is `installed " +"`_." msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 @@ -3031,32 +3283,14 @@ msgid "" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:45 #, fuzzy -msgid "Build and start the services using the following command:" +msgid "" +"Export the version of Flower that your environment uses. Then, build and " +"start the services using the following command:" msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 -#, fuzzy -msgid "" -"Append the following lines to the end of the ``pyproject.toml`` file and " -"save it:" -msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 -#: ../../source/docker/tutorial-quickstart-docker.rst:324 -#, fuzzy -msgid "pyproject.toml" -msgstr "또는 ``pyproject.toml``:" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 -msgid "" -"You can customize the string that follows ``tool.flwr.federations.`` to " -"fit your needs. However, please note that the string cannot contain a dot" -" (``.``)." -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3064,77 +3298,78 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 -#, fuzzy -msgid "Run the example:" -msgstr "전체 코드 예제" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 -msgid "Follow the logs of the SuperExec service:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:72 +msgid "Run the example and follow the logs of the ``ServerApp`` :" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:78 msgid "" "That is all it takes! You can monitor the progress of the run through the" -" logs of the SuperExec." +" logs of the ``ServerApp``." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:91 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 #, fuzzy msgid "Limitations" msgstr "동기" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 #, fuzzy msgid "Quickstart Example" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "quickstart-fastai" msgstr "빠른 시작 튜토리얼" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/ref-changelog.md:236 ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:879 ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:1001 ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1132 msgid "None" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-huggingface" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-jax" msgstr "빠른 시작" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "빠른 시작" + #: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 msgid "" "The example has not yet been updated to work with the latest ``flwr`` " "version." @@ -3142,63 +3377,54 @@ msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "quickstart-mlcube" -msgstr "빠른 시작" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 -#, fuzzy msgid "quickstart-mlx" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 #, fuzzy msgid "quickstart-monai" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-pandas" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy msgid "quickstart-pytorch" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy msgid "quickstart-tabnet" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tensorflow" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 -msgid "Only runs on AMD64." -msgstr "" - #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -3228,8 +3454,8 @@ msgid "" "You will learn how to run the Flower client and server components on two " "separate machines, with Flower configured to use TLS encryption and " "persist SuperLink state across restarts. A server consists of a SuperLink" -" and ``SuperExec``. For more details about the Flower architecture, refer" -" to the :doc:`../explanation-flower-architecture` explainer page." +" and a ``ServerApp``. For more details about the Flower architecture, " +"refer to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 @@ -3284,132 +3510,142 @@ msgstr "" msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:47 msgid "" "Use the ``certs.yml`` Compose file to generate your own self-signed " "certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:212 msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:54 msgid "" "For production environments, you may have to use dedicated services to " "obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:57 msgid "" -"First, set the environment variables ``SUPERLINK_IP`` and " -"``SUPEREXEC_IP`` with the IP address from the remote machine. For " -"example, if the IP is ``192.168.2.33``, execute:" +"First, set the environment variable ``SUPERLINK_IP`` with the IP address " +"from the remote machine. For example, if the IP is ``192.168.2.33``, " +"execute:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:64 msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:71 msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:73 msgid "" "Use the method that works best for you to copy the ``server`` directory, " -"the certificates, and your Flower project to the remote machine." +"the certificates, and the ``pyproject.toml`` file of your Flower project " +"to the remote machine." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:86 msgid "Step 3: Start the Flower Server Components" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:88 msgid "" "Log into the remote machine using ``ssh`` and run the following command " -"to start the SuperLink and SuperExec services:" +"to start the SuperLink and ``ServerApp`` services:" msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``server`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the server ``compose.yml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:107 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions. After exporting the ``PROJECT_DIR`` (after line " +"4), run the following commands:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:116 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:165 +msgid "" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:118 msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:121 #, fuzzy msgid "Step 4: Start the Flower Client Components" msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:123 msgid "" "On your local machine, run the following command to start the client " "components:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:133 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``client`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the client ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:137 #, fuzzy msgid "Step 5: Run Your Flower Project" msgstr "Flower SuperNode를 실행합니다." -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Specify the remote SuperExec IP addresses and the path to the root " -"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " -"the ``pyproject.toml`` file. Here, we have named our remote federation " -"``remote-superexec``:" +"Specify the remote SuperLink IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-deployment]`` table in" +" the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-deployment``:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:143 msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 -msgid "" -"The Path of the ``root-certificates`` should be relative to the location " -"of the ``pyproject.toml`` file." -msgstr "" - -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 -msgid "To run the project, execute:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:155 +msgid "Run the project and follow the ``ServerApp`` logs:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 msgid "" "That's it! With these steps, you've set up Flower on two separate " "machines and are ready to start using it." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:165 msgid "Step 6: Clean Up" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:167 #, fuzzy msgid "Shut down the Flower client components:" msgstr "Flower 클라이언트 앱을 실행합니다." -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:174 msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" @@ -3431,16 +3667,16 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:33 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:39 +#: ../../source/docker/tutorial-quickstart-docker.rst:38 msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:44 msgid "" "User-defined networks, such as ``flwr-network``, enable IP resolution of " "container names, a feature absent in the default bridge network. This " @@ -3448,52 +3684,55 @@ msgid "" "first." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#: ../../source/docker/tutorial-quickstart-docker.rst:49 #, fuzzy msgid "Step 2: Start the SuperLink" msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 -#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:64 +#: ../../source/docker/tutorial-quickstart-docker.rst:51 msgid "Open your terminal and run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "Understand the command" +msgid "" +"``-p 9091:9091 -p 9092:9092 -p 9093:9093``: Map port ``9091``, ``9092`` " +"and ``9093`` of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " -"container to the same port of" +"container to the same port of the host machine, allowing other services " +"to access the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "the host machine, allowing other services to access the Driver API on" +msgid "" +"ServerAppIO API on ``http://localhost:9091``, the Fleet API on " +"``http://localhost:9092`` and" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgid "the Exec API on ``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:71 -#: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:219 -#: ../../source/docker/tutorial-quickstart-docker.rst:309 +#: ../../source/docker/tutorial-quickstart-docker.rst:74 +#: ../../source/docker/tutorial-quickstart-docker.rst:114 +#: ../../source/docker/tutorial-quickstart-docker.rst:223 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:72 +#: ../../source/docker/tutorial-quickstart-docker.rst:75 msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:73 -#: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:220 -#: ../../source/docker/tutorial-quickstart-docker.rst:311 +#: ../../source/docker/tutorial-quickstart-docker.rst:76 +#: ../../source/docker/tutorial-quickstart-docker.rst:116 +#: ../../source/docker/tutorial-quickstart-docker.rst:225 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3515,15 +3754,26 @@ msgstr "" msgid "unencrypted communication." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:80 -msgid "Step 3: Start the SuperNode" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"independent process. The SuperLink does not attempt to create it. You can" +" learn more about" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the different process modes here: :doc:`run-as-subprocess`." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:82 +#: ../../source/docker/tutorial-quickstart-docker.rst:86 +#, fuzzy +msgid "Step 3: Start the SuperNodes" +msgstr "서버(SuperLink)" + +#: ../../source/docker/tutorial-quickstart-docker.rst:88 msgid "Start two SuperNode containers." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:84 +#: ../../source/docker/tutorial-quickstart-docker.rst:90 msgid "Start the first container:" msgstr "" @@ -3539,18 +3789,18 @@ msgstr "" msgid "``http://localhost:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:115 msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " -"to be run and the specific tag" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: This is the " +"name of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "of the image." +msgid "image to be run and the specific tag of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst @@ -3575,50 +3825,53 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--supernode-address 0.0.0.0:9094``: Set the address and port number " -"that the SuperNode" +"``--clientappio-api-address 0.0.0.0:9094``: Set the address and port " +"number that the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "is listening on." +msgid "SuperNode is listening on to communicate with the ClientApp. If" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--isolation process``: Tells the SuperNode that the ClientApp is " -"created by separate" +"two SuperNodes are started on the same machine, set two different port " +"numbers for each SuperNode." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "independent process. The SuperNode does not attempt to create it." +msgid "" +"(E.g. In the next step, we set the second SuperNode container to listen " +"on port 9095)" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#: ../../source/docker/tutorial-quickstart-docker.rst:132 msgid "Start the second container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:142 -msgid "Step 4: Start the ClientApp" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#, fuzzy +msgid "Step 4: Start a ServerApp" +msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-quickstart-docker.rst:144 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 msgid "" -"The ClientApp Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own ClientApp image. In order to " +"The ServerApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ServerApp image. In order to " "install the FAB dependencies, you will need to create a Dockerfile that " -"extends the ClientApp image and installs the required dependencies." +"extends the ServerApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:149 +#: ../../source/docker/tutorial-quickstart-docker.rst:157 msgid "" -"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " -"the following code into it:" +"Create a ServerApp Dockerfile called ``serverapp.Dockerfile`` and paste " +"the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#: ../../source/docker/tutorial-quickstart-docker.rst:160 #, fuzzy -msgid "Dockerfile.clientapp" -msgstr "flower 클라이언트 앱" +msgid "serverapp.Dockerfile" +msgstr "SuperNode Dockerfile 만들기" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy @@ -3627,13 +3880,13 @@ msgstr "SuperNode Dockerfile 만들기" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/serverapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/clientapp image``, version :substitution-" +"to be built from is the ``flwr/serverapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" @@ -3692,7 +3945,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"``ENTRYPOINT [\"flwr-serverapp\"]``: Set the command ``flwr-serverapp`` " "to be" msgstr "" @@ -3700,7 +3953,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:186 +#: ../../source/docker/tutorial-quickstart-docker.rst:194 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3709,216 +3962,212 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:192 -#, fuzzy -msgid "" -"Next, build the ClientApp Docker image by running the following command " -"in the directory where the Dockerfile is located:" -msgstr "" -"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" -" 빌드합니다." - -#: ../../source/docker/tutorial-quickstart-docker.rst:201 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst:200 msgid "" -"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " -"Remember that these values are merely examples, and you can customize " -"them according to your requirements." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the ServerApp image:" msgstr "" -"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " -"뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:205 -msgid "Start the first ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst:224 +msgid "``--name serverapp``: Assign the name ``serverapp`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy msgid "" -"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +"``flwr_serverapp:0.0.1``: This is the name of the image to be run and the" " specific tag" -msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." +msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" -" the address" +msgid "of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``supernode-1:9094``." +msgid "" +"``--serverappio-api-address superlink:9091``: Connect to the SuperLink's " +"ServerAppIO API" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:226 -msgid "Start the second ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "at the address ``superlink:9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 -msgid "Step 5: Start the SuperExec" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:234 +#, fuzzy +msgid "Step 5: Start the ClientApp" +msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#: ../../source/docker/tutorial-quickstart-docker.rst:236 #, fuzzy msgid "" -"The procedure for building and running a SuperExec image is almost " -"identical to the ClientApp image." +"The procedure for building and running a ClientApp image is almost " +"identical to the ServerApp image." msgstr "ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" -"Similar to the ClientApp image, you will need to create a Dockerfile that" -" extends the SuperExec image and installs the required FAB dependencies." +"Similar to the ServerApp image, you will need to create a Dockerfile that" +" extends the ClientApp image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " -"the following code in:" +"Create a ClientApp Dockerfile called ``clientapp.Dockerfile`` and paste " +"the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:248 -msgid "Dockerfile.superexec" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#, fuzzy +msgid "clientapp.Dockerfile" +msgstr "클라이언트앱" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"to be built from is the ``flwr/superexec image``, version :substitution-" +"to be built from is the ``flwr/clientapp`` image, version :substitution-" "code:`|stable_flwr_version|`." -msgstr "" +msgstr "다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니다:" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" -"superexec`` to be" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:283 +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#, fuzzy msgid "" -"Afterward, in the directory that holds the Dockerfile, execute this " -"Docker command to build the SuperExec image:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst:290 -msgid "Start the SuperExec container:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" +"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" +" 빌드합니다." -#: ../../source/docker/tutorial-quickstart-docker.rst +#: ../../source/docker/tutorial-quickstart-docker.rst:286 +#, fuzzy msgid "" -"the host machine, allowing you to access the SuperExec API on " -"``http://localhost:9093``." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" +"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " +"뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:310 -msgid "``--name superexec``: Assign the name ``superexec`` to the container." +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +msgid "Start the first ClientApp container:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy msgid "" -"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" " specific tag" -msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." +msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " -"SuperExec executor to" +"``--clientappio-api-address supernode-1:9094``: Connect to the " +"SuperNode's ClientAppIO" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "connect to the SuperLink running on port ``9091``." +msgid "API at the address ``supernode-1:9094``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:314 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:320 +#: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#: ../../source/docker/tutorial-quickstart-docker.rst:328 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/docker/tutorial-quickstart-docker.rst:331 -msgid "Run the ``quickstart-docker`` project by executing the command:" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:337 -msgid "Follow the SuperExec logs to track the execution of the run:" +msgid "" +"Run the ``quickstart-docker`` project and follow the ServerApp logs to " +"track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:345 msgid "Step 7: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:346 +#: ../../source/docker/tutorial-quickstart-docker.rst:347 msgid "" "Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#: ../../source/docker/tutorial-quickstart-docker.rst:350 msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:356 +#: ../../source/docker/tutorial-quickstart-docker.rst:357 #, fuzzy -msgid "Stop the current ClientApp containers:" +msgid "Stop the current ServerApp and ClientApp containers:" msgstr "현재 클라이언트 속성입니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:361 +msgid "" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:364 +msgid "If you haven’t made any changes, you can skip steps 2 through 4." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:370 #, fuzzy -msgid "Rebuild the FAB and ClientApp image:" +msgid "Rebuild ServerApp and ClientApp images:" msgstr "기본 이미지 빌드" -#: ../../source/docker/tutorial-quickstart-docker.rst:368 -msgid "Launch two new ClientApp containers based on the newly built image:" +#: ../../source/docker/tutorial-quickstart-docker.rst:377 +msgid "" +"Launch one new ServerApp and two new ClientApp containers based on the " +"newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:383 +#: ../../source/docker/tutorial-quickstart-docker.rst:402 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:390 +#: ../../source/docker/tutorial-quickstart-docker.rst:409 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:392 +#: ../../source/docker/tutorial-quickstart-docker.rst:411 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 -#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker.rst:423 #, fuzzy msgid "Where to Go Next" msgstr "시작 위치" -#: ../../source/docker/tutorial-quickstart-docker.rst:406 +#: ../../source/docker/tutorial-quickstart-docker.rst:425 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:407 +#: ../../source/docker/tutorial-quickstart-docker.rst:426 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:427 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3945,176 +4194,161 @@ msgstr "" msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:39 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:46 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " -"SuperExec and SuperNode images correctly." +"``ServerApp`` and ``ClientApp`` images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Flower SuperNode를 실행합니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:53 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:61 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:175 #, fuzzy msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 -msgid "" -"``-f compose.yml``: Specify the YAML file that contains the basic Flower " -"service definitions." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:181 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:182 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:77 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 msgid "" -"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" -" the SuperExec addresses in the ``pyproject.toml`` file." +"To ensure the ``flwr`` CLI connects to the SuperLink, you need to specify" +" the SuperLink addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:85 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:225 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 -msgid "Execute the command to run the quickstart example:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 -msgid "Monitor the SuperExec logs and wait for the summary to appear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:94 +msgid "" +"Run the quickstart example, monitor the ``ServerApp`` logs and wait for " +"the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:102 msgid "Step 4: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:121 #, fuzzy msgid "Rebuild and restart the services." msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 -msgid "" -"If you have modified the dependencies listed in your ``pyproject.toml`` " -"file, it is essential to rebuild images." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:128 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:130 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:136 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 -msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the ``ServerApp`` logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:155 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:157 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:162 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 -msgid "" -"For more information, consult the following page: :doc:`persist-" -"superlink-state`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:167 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +msgid "Run the command:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 -msgid "Run the command:" +msgid "" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst @@ -4134,17 +4368,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:238 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:197 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -4152,121 +4386,106 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:205 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:207 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 -msgid "Step 7: Add another SuperNode" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 -msgid "" -"You can add more SuperNodes and ClientApps by duplicating their " -"definitions in the ``compose.yml`` file." -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:245 +#, fuzzy +msgid "Step 7: Add another SuperNode and ClientApp" +msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 msgid "" -"Just give each new SuperNode and ClientApp service a unique service name " -"like ``supernode-3``, ``clientapp-3``, etc." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 -msgid "In ``compose.yml``, add the following:" +"You can add more SuperNodes and ClientApps by uncommenting their " +"definitions in the ``compose.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:250 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 -msgid "" -"If you also want to enable TLS for the new SuperNodes, duplicate the " -"SuperNode definition for each new SuperNode service in the ``with-" -"tls.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:302 msgid "" -"Make sure that the names of the services match with the one in the " -"``compose.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 -msgid "In ``with-tls.yml``, add the following:" +"If you also want to enable TLS for the new SuperNode, uncomment the " +"definition in the ``with-tls.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:305 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:326 +#, fuzzy +msgid "Restart the services with:" +msgstr "이미 *서버*를 시작할 수 있습니다:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:335 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 -msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:340 +msgid "Comment out the lines 2-6 and uncomment the lines 7-13:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:342 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:376 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:378 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 #, fuzzy msgid "Remove all services and volumes:" msgstr "R에서 모든 항목을 제거합니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:402 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "빠른 시작 튜토리얼" @@ -4298,471 +4517,6 @@ msgstr "" "서버앱 야간 이미지와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 " "``1.9.0.dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." -msgstr "" -"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " -"`_으로 기존 머신러닝 워크로드의 federated 버전을 구축하기 " -"위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 사용하여 CIFAR-10 데이터 세트에서 컨볼루션 " -"신경망(일괄 정규화 레이어 포함)을 훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " -"연합식으로 ` 와 비교했을 때 몇 가지 사항만 " -"변경 하면 됩니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "Centralized Training" -msgstr "중앙 집중식 훈련" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 -#, fuzzy -msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called ``cifar.py``, revised part is " -"shown below:" -msgstr "" -"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 " -"아래와 같습니다:" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 -msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 -msgid "You can now run your machine learning workload:" -msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 -msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." -msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 " -"FedBN 내에서 하나의 서버와 두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 -msgid "Federated Training" -msgstr "연합 훈련" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 -#, fuzzy -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only ``get_parameters`` and ``set_parameters`` function " -"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" -" PyTorch - From Centralized To Federated `. first." -msgstr "" -":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 :code:`client.py`의 " -":code:`get_parameters`와 :code:`set_parameters` 함수만 수정해야 합니다. 그렇지 않은 경우 " -":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 먼저 읽어보세요." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -"``server.py`` keeps unchanged, we can start the server directly." -msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " -"그대로 유지되므로 서버를 바로 시작할 수 있습니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 -#, fuzzy -msgid "" -"Finally, we will revise our *client* logic by changing ``get_parameters``" -" and ``set_parameters`` in ``client.py``, we will exclude batch " -"normalization parameters from model parameter list when sending to or " -"receiving from the server." -msgstr "" -"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " -":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" -" 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "" -"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 두 클라이언트에서" -" FedBN으로 연합 학습을 실행하는 것을 확인합니다. 축하합니다!" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 -#: ../../source/tutorial-quickstart-jax.rst:319 -msgid "Next Steps" -msgstr "다음 단계" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 -msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" -msgstr "" -"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물론 이 예제는 두 " -"클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 " -"주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" -" 클라이언트를 더 추가하는 것은 어떨까요?" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" -"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. 여기서는 " -"PyTorch를 사용해 CIFAR-10 데이터 세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " -"`_ " -"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 작업을 소개합니다. 그런 다음 " -"centralized 훈련 코드를 기반으로 연합 방식 훈련을 실행합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." -msgstr "" -"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 일이 일어나고 있는지 더 자세히 설명하려면 공식 " -"`PyTorch 튜토리얼 " -"`_을 " -"참조하세요." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 -#, fuzzy -msgid "" -"Let's create a new file called ``cifar.py`` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as ``torch`` and ``torchvision``) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the" -" federated learning components at a later point." -msgstr "" -"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 :code:`cifar.py`라는 새 파일을 " -"생성해 보겠습니다. 먼저, 필요한 모든 패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 " -"합니다. 연합 학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 구성 요소를 추가할 때에도 이러한 " -"모든 가져오기를 그대로 유지할 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 -#, fuzzy -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in ``class Net()``." -msgstr "" -"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 컨볼루션 신경망)는" -" :code:`class Net()`에 정의되어 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 -#, fuzzy -msgid "" -"The ``load_data()`` function loads the CIFAR-10 training and test sets. " -"The ``transform`` normalized the data after loading." -msgstr "" -":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:`transform`은 " -"로드 후 데이터를 정규화합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 -#, fuzzy -msgid "" -"We now need to define the training (function ``train()``) which loops " -"over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "" -"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 배치에 대해 하나의 최적화 단계를 수행하는 " -"학습(함수 :code:`train()`)을 정의해야 합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 -#, fuzzy -msgid "" -"The evaluation of the model is defined in the function ``test()``. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." -msgstr "" -"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘플을 반복하고 테스트 데이터 세트에 따라" -" 모델의 손실을 측정합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." -msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 것을 사용하여 하나의 " -"서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "" -"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)로 모델을 학습시키는데, 이를 중앙 집중식 " -"학습이라고 부릅니다. 이전 섹션에서 설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 사용해 보셨을 " -"것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 " -"합니다. 이는 상당한 노력이 필요할 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 -#, fuzzy -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in ``cifar.py`` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." -msgstr "" -"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`cifar.py`의 코드를 " -"사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 업데이트합니다. " -"업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 " -"프로세스의 한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 -#: ../../source/tutorial-quickstart-jax.rst:147 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -"``server.py`` first. The *server* needs to import the Flower package " -"``flwr``. Next, we use the ``start_server`` function to start a server " -"and tell it to perform three rounds of federated learning." -msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " -"*server*는 Flower 패키지 :code:`flwr`를 가져와야 합니다. 다음으로, :code:`start_server` " -"함수를 사용하여 서버를 시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 -#: ../../source/tutorial-quickstart-jax.rst:161 -msgid "We can already start the *server*:" -msgstr "이미 *서버*를 시작할 수 있습니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#, fuzzy -msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined centralized training in ``cifar.py``. Our " -"*client* needs to import ``flwr``, but also ``torch`` to update the " -"parameters on our PyTorch model:" -msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`에서 이전에 정의한 " -"중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 " -"업데이트하기 위해 :code:`torch`도 가져와야 합니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 -#, fuzzy -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " -"than ``Client`` if you use a framework with good NumPy interoperability " -"(like PyTorch or TensorFlow/Keras) because it avoids some of the " -"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " -"implement four methods, two methods for getting/setting model parameters," -" one method for training the model, and one method for testing the model:" -msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " -":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 우리의 구현은 " -":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`CifarClient`라고 부를 " -"것입니다. :code:`NumPyClient`는 파이토치나 텐서플로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 " -"사용하는 경우 필요한 일부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습니다. " -"code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" -" 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#, fuzzy -msgid "``set_parameters``" -msgstr ":code:`set_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 -#: ../../source/tutorial-quickstart-jax.rst:192 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "" -"loop over the list of model parameters received as NumPy ``ndarray``'s " -"(think list of neural network layers)" -msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#: ../../source/tutorial-quickstart-scikitlearn.rst:129 -#, fuzzy -msgid "``get_parameters``" -msgstr ":code:`get_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#, fuzzy -msgid "" -"get the model parameters and return them as a list of NumPy ``ndarray``'s" -" (which is what ``flwr.client.NumPyClient`` expects)" -msgstr "" -"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " -":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -#, fuzzy -msgid "``fit``" -msgstr "``DISTRO``" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 -#: ../../source/tutorial-quickstart-jax.rst:200 -#: ../../source/tutorial-quickstart-jax.rst:205 -msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -msgid "train the model on the local training set" -msgstr "로컬 훈련 세트에서 모델을 훈련합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 -msgid "get the updated local model weights and return them to the server" -msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -#: ../../source/tutorial-quickstart-jax.rst:208 -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -#, fuzzy -msgid "``evaluate``" -msgstr ":code:`evaluate`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 -#: ../../source/tutorial-quickstart-jax.rst:207 -msgid "evaluate the updated model on the local test set" -msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -msgid "return the local loss and accuracy to the server" -msgstr "로컬 손실 및 정확도를 서버에 반환합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 -#, fuzzy -msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " -"So what we really do here is we tell Flower through our ``NumPyClient`` " -"subclass which of our already defined functions to call for training and " -"evaluation. We included type annotations to give you a better " -"understanding of the data types that get passed around." -msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " -":code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합니다. 따라서 여기서" -" 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " -"Flower에 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 -#, fuzzy -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a ``CifarClient``, and starts this client. You load your " -"data and model by using ``cifar.py``. Start ``CifarClient`` with the " -"function ``fl.client.start_client()`` by pointing it at the same IP " -"address we used in ``server.py``:" -msgstr "" -"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " -"작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과" -" 동일한 IP 주소를 지정하여 :code:`fl.client.start_client()` 함수로 " -":code:`CifarClient`를 시작합니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 -#: ../../source/tutorial-quickstart-jax.rst:309 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "" -"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) PyTorch 프로젝트가 두 클라이언트에서 연합 " -"학습을 실행하는 것을 확인합니다. 축하합니다!" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" -"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코드) " -"`_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 " -"다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서" -" 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" - #: ../../source/explanation-differential-privacy.rst:2 #: ../../source/explanation-differential-privacy.rst:14 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 @@ -5032,7 +4786,7 @@ msgstr "고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:141 -#: ../../source/how-to-use-differential-privacy.rst:113 +#: ../../source/how-to-use-differential-privacy.rst:114 msgid "Local Differential Privacy" msgstr "로컬 차등 프라이버시" @@ -5113,7 +4867,6 @@ msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "[4] Galen 외. 조정형 클리핑을 통한 차등적 개인 학습." #: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated evaluation" msgstr "연합 평가" @@ -5144,11 +4897,11 @@ msgstr "" "모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 " "입력으로 받아 평가 결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/explanation-federated-evaluation.rst:61 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "Custom Strategies" msgstr "사용자 정의 전략" -#: ../../source/explanation-federated-evaluation.rst:63 +#: ../../source/explanation-federated-evaluation.rst:72 #, fuzzy msgid "" "The ``Strategy`` abstraction provides a method called ``evaluate`` that " @@ -5159,32 +4912,33 @@ msgstr "" "코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있는 :코드:`평가`라는 메서드를 제공합니다. 현재 " "서버 구현에서는 매개변수 집계 후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." -#: ../../source/explanation-federated-evaluation.rst:69 +#: ../../source/explanation-federated-evaluation.rst:78 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated Evaluation" msgstr "연합 평가" -#: ../../source/explanation-federated-evaluation.rst:72 +#: ../../source/explanation-federated-evaluation.rst:81 msgid "Implementing Federated Evaluation" msgstr "연합 평가 구현" -#: ../../source/explanation-federated-evaluation.rst:74 +#: ../../source/explanation-federated-evaluation.rst:83 #, fuzzy msgid "" "Client-side evaluation happens in the ``Client.evaluate`` method and can " "be configured from the server side." msgstr "클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에서 구성할 수 있습니다." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/explanation-federated-evaluation.rst:116 msgid "Configuring Federated Evaluation" msgstr "연합 평가 구성" -#: ../../source/explanation-federated-evaluation.rst:110 +#: ../../source/explanation-federated-evaluation.rst:118 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 지원합니다:" -#: ../../source/explanation-federated-evaluation.rst:113 +#: ../../source/explanation-federated-evaluation.rst:121 #, fuzzy msgid "" "``fraction_evaluate``: a ``float`` defining the fraction of clients that " @@ -5198,7 +4952,7 @@ msgstr "" "있는 경우 :code:`10`이 평가를 위해 무작위로 선택됩니다. code:`fraction_evaluate`가 " ":code:`0.0`으로 설정된 경우 연합 평가가 비활성화됩니다." -#: ../../source/explanation-federated-evaluation.rst:118 +#: ../../source/explanation-federated-evaluation.rst:126 #, fuzzy msgid "" "``min_evaluate_clients``: an ``int``: the minimum number of clients to be" @@ -5211,7 +4965,7 @@ msgstr "" "20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 " "선택됩니다." -#: ../../source/explanation-federated-evaluation.rst:122 +#: ../../source/explanation-federated-evaluation.rst:130 #, fuzzy msgid "" "``min_available_clients``: an ``int`` that defines the minimum number of " @@ -5225,7 +4979,7 @@ msgstr "" ":code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트가 연결될 때까지 기다렸다가 평가를 위한 " "클라이언트 샘플링을 계속합니다." -#: ../../source/explanation-federated-evaluation.rst:127 +#: ../../source/explanation-federated-evaluation.rst:135 #, fuzzy msgid "" "``on_evaluate_config_fn``: a function that returns a configuration " @@ -5238,11 +4992,11 @@ msgstr "" "단계 중에 호출되며, 서버 측에서 클라이언트 측 평가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 수" " 구성)." -#: ../../source/explanation-federated-evaluation.rst:157 +#: ../../source/explanation-federated-evaluation.rst:177 msgid "Evaluating Local Model Updates During Training" msgstr "훈련 중 로컬 모델 업데이트 평가" -#: ../../source/explanation-federated-evaluation.rst:159 +#: ../../source/explanation-federated-evaluation.rst:179 #, fuzzy msgid "" "Model parameters can also be evaluated during training. ``Client.fit`` " @@ -5251,16 +5005,18 @@ msgstr "" "모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 평가 결과를 dictionary로 " "반환할 수 있습니다:" -#: ../../source/explanation-federated-evaluation.rst:201 +#: ../../source/explanation-federated-evaluation.rst:220 msgid "Full Code Example" msgstr "전체 코드 예제" -#: ../../source/explanation-federated-evaluation.rst:203 +#: ../../source/explanation-federated-evaluation.rst:222 +#, fuzzy msgid "" "For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"evaluation, see the `Advanced TensorFlow Example " +"`_" +" (the same approach can be applied to workloads implemented in any other " +"framework)." msgstr "" "연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 " "동일한 접근 방식을 적용할 수 있음)를 참조하세요: " @@ -5466,40 +5222,13 @@ msgid "" "a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/explanation-flower-architecture.rst:121 -msgid "" -"To help you start and manage all of the concurrently executing training " -"runs, Flower offers one additional long-running server-side service " -"called **SuperExec**. When you type ``flwr run`` to start a new training " -"run, the ``flwr`` CLI bundles your local project (mainly your " -"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " -"**SuperExec** will then take care of starting and managing your " -"``ServerApp``, which in turn selects SuperNodes to execute your " -"``ClientApp``." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:128 -msgid "" -"This architecture allows many users to (concurrently) run their projects " -"on the same federation, simply by typing ``flwr run`` on their local " -"developer machine." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "Flower Deployment Engine with SuperExec" -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "The SuperExec service for managing concurrent training runs in Flower." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:141 +#: ../../source/explanation-flower-architecture.rst:123 msgid "" "This explanation covers the Flower Deployment Engine. An explanation " "covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:146 +#: ../../source/explanation-flower-architecture.rst:128 #, fuzzy msgid "" "As we continue to enhance Flower at a rapid pace, we'll periodically " @@ -5508,560 +5237,61 @@ msgstr "" "Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " "언제든지 공유해 주세요!" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 템플릿" - -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "목차" - -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[목차](#목차)" - -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[요약](#요약)" - -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[동기](#동기)" - -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[목표](#목표)" - -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[비목표](#비목표)" - -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[제안](#제안)" - -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[단점](#단점)" - -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[고려되는 대안](#고려되는 대안)" - -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[부록](#부록)" - -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "요약" - -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 문장 1: 문제 요약\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "동기" - -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "목표" - -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "목표가 아닌 것" - -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "제안" - -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "단점" - -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "고려되는 대안" - -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[대안 1\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[대안 2\\]" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower Enhancement Doc" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Metadata](#metadata)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[워크플로우](#워크플로우)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "변화의 동기가 분명한지 확인합니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "종합 평가 결과" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "" -"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 " -"적절히 대표되도록 보장합니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "기능 및 effort-tracking 문서" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "제품 요구 사항 문서" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "디자인 문서" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 집계를 완전히 사용자 지정할 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 -msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "사용자 지정 평가 결과 집계" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변경 사항을 이해하고 전달하기 위해 단일 " -"GitHub 이슈 또는 pull request를 넘어서는 abstraction이 필요합니다." +"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오는 사용자 지정 평가 결과를 집계할 수 " +"있습니다. 클라이언트는 dictionary를 반환하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/how-to-aggregate-evaluation-results.rst:38 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." -msgstr "" -"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프로세스는 Slack 스레드, 영상 통화, 복도 " -"대화에서 나온 의사 결정을 잘 추적된 아티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니다." +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하는 메트릭을 집계할 수 있습니다:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "" -"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 합니다. 개선 사항을 작성자나 개발자 이외의 다른 " -"사람에게 서면 또는 구두로 설명해야 하는 경우에는 개선 문서 작성을 고려하세요." +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" +msgstr "SuperNodes 인증하기" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주요 아키텍처 변경)도 널리 알려야 합니다. 개선 " -"프로세스는 일반 사용자나 운영자에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." +"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있는 인증된 SuperNodes에" +" 대한 기본 지원을 제공합니다. Flower 노드 인증은 GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 -msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." -msgstr "" -"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을" -" 추가하는 것은 Flower의 작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지하는 것이므로 기능 요청과는 다릅니다." +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 -msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "" -"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 워크플로우를 따르는 Markdown 파일에" -" 캡처됩니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Enhancement Doc 템플릿" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 -msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Title (metadata와 같게)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table of Contents (필요시)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Constraints/Caveats (선택 사항)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Design Details (선택 사항)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "졸업 기준" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "참고로 이 문서는 위의 구조를 따릅니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Metadata" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "" -"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번호를 사용하면 다른 제안을 " -"쉽게 참조할 수 있습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)를 참조하세요." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 -msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "워크플로우" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "" -"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있어야 합니다. 따라서 개선 사항을 주도하는 사(보통 " -"작성자)이 필요합니다. 이 사람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." -msgstr "" -"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크인되며, `NNNN`은 " -"Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 개선 사항은 pull request의 일부로 `잠정`" -" 상태에서 시작됩니다. 토론은 pull request 검토의 일부로 이루어집니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." -msgstr "" -"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실제 구현은 별도의 pull requests를 통해 " -"이루어집니다. 이러한 pull requests는 설명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 " -"'구현됨'으로 변경됩니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활발하게 정의 및 논의되는 동안의 시작 단계입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "" -"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추가하면 더 복잡해지고 잠재적인 처음인" -" 기여자에게는 장벽이 될 수 있습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "" -"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장하는 것은 영어가 모국어가 아닌 사용자에게는 큰 " -"부담이 될 수 있습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 이슈" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." -msgstr "" -"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그를 사용하여 다른 이슈와 구별하고 필터링할 수 " -"있습니다. 주요 이슈는 개선 사항에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습니다. 개선 " -"사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 " -"토론을 관리하면 혼란스러울 수 있습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google 문서 도구" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." -msgstr "" -"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 외부에서 호스팅되므로 커뮤니티에서 검색할 " -"수 있도록 관리해야 합니다. 모든 제안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일부로 " -"제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." - -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 개선 문서" - -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "종합 평가 결과" - -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 집계를 완전히 사용자 지정할 수 있습니다." - -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "사용자 지정 평가 결과 집계" - -#: ../../source/how-to-aggregate-evaluation-results.rst:10 -#, fuzzy -msgid "" -"The same ``Strategy``-customization approach can be used to aggregate " -"custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "" -"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오는 사용자 지정 평가 결과를 집계할 수 " -"있습니다. 클라이언트는 dictionary를 반환하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" - -#: ../../source/how-to-aggregate-evaluation-results.rst:39 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" -msgstr "그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하는 메트릭을 집계할 수 있습니다:" - -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" -msgstr "SuperNodes 인증하기" - -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" -msgstr "" -"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있는 인증된 SuperNodes에" -" 대한 기본 지원을 제공합니다. Flower 노드 인증은 GitHub SSH 인증 방식과 유사하게 작동합니다:" - -#: ../../source/how-to-authenticate-supernodes.rst:8 -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" - -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" -msgstr "SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" - -#: ../../source/how-to-authenticate-supernodes.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" "Shared secret is used to compute the HMAC value of the message sent from " "SuperNode to SuperLink as a token" @@ -6215,7 +5445,7 @@ msgstr "" "포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으로 이루어지도록 하세요." #: ../../source/how-to-authenticate-supernodes.rst:100 -#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-enable-tls-connections.rst:108 #: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" @@ -6234,18 +5464,17 @@ msgstr "" " 알고 안전하게 보관해야 합니다." #: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#, fuzzy +msgid "Configure Clients" msgstr "클라이언트 구성" #: ../../source/how-to-configure-clients.rst:4 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"Flower provides the ability to send configuration values to clients, " +"allowing server-side control over client behavior. This feature enables " +"flexible and dynamic adjustment of client-side hyperparameters, improving" +" collaboration and experimentation." msgstr "" -"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구성 값은 다양한 용도로 사용할 수 있습니다. 예를" -" 들어 서버에서 클라이언트 측 하이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." #: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" @@ -6253,261 +5482,419 @@ msgstr "구성 값" #: ../../source/how-to-configure-clients.rst:11 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These values must be of type ``Scalar``, which includes " +"``bool``, ``bytes``, ``float``, ``int``, or ``str`` (or equivalent types " +"in different languages). Scalar is the value type directly supported by " +"Flower for these configurations." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:17 +msgid "For example, a ``FitConfig`` dictionary might look like this:" msgstr "" -"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), ``int`` 또는" -" ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전으로 표현됩니다. 다음은 Python의 구성 사전 " -"예제입니다:" -#: ../../source/how-to-configure-clients.rst:25 +#: ../../source/how-to-configure-clients.rst:28 +#, fuzzy msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " +"Flower serializes these configuration dictionaries (or *config dicts* for" +" short) to their ProtoBuf representation, transports them to the client " "using gRPC, and then deserializes them back to Python dictionaries." msgstr "" "Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현으로 직렬화하고, " "gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python dictionaries로 역직렬화합니다." -#: ../../source/how-to-configure-clients.rst:31 +#: ../../source/how-to-configure-clients.rst:34 +#, fuzzy msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"dictionaries. To send collections, convert them to a supported type " +"(e.g., JSON string) and decode on the client side." msgstr "" "현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 전송하는 기능은 지원되지 " "않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 " "있습니다." -#: ../../source/how-to-configure-clients.rst:36 +#: ../../source/how-to-configure-clients.rst:38 +#, fuzzy +msgid "Example:" +msgstr "예시" + +#: ../../source/how-to-configure-clients.rst:51 +#, fuzzy +msgid "Configuration through Built-in Strategies" +msgstr "기본 제공 전략을 통한 구성" + +#: ../../source/how-to-configure-clients.rst:53 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"Flower provides configuration options to control client behavior " +"dynamically through ``FitConfig`` and ``EvaluateConfig``. These " +"configurations allow server-side control over client-side parameters such" +" as batch size, number of local epochs, learning rate, and evaluation " +"settings, improving collaboration and experimentation." msgstr "" -"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary을 사용하여 JSON 문자열을 전송한 다음" -" 클라이언트에서 다시 부동 소수점 숫자 목록으로 변환할 수 있습니다." -#: ../../source/how-to-configure-clients.rst:41 -msgid "Configuration through built-in strategies" -msgstr "기본 제공 전략을 통한 구성" +#: ../../source/how-to-configure-clients.rst:59 +#, fuzzy +msgid "``FitConfig`` and ``EvaluateConfig``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-configure-clients.rst:61 +msgid "" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These dictionaries enable the server to adjust client-" +"side hyperparameters and monitor progress effectively." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:67 +#, fuzzy +msgid "``FitConfig``" +msgstr "``DISTRO``" + +#: ../../source/how-to-configure-clients.rst:69 +msgid "" +"``FitConfig`` specifies the hyperparameters for training rounds, such as " +"the batch size, number of local epochs, and other parameters that " +"influence training." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:72 +msgid "For example, a ``fit_config`` callback might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:90 +msgid "" +"You can then pass this ``fit_config`` callback to a built-in strategy " +"such as ``FedAvg``:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:101 +msgid "" +"On the client side, the configuration is received in the ``fit`` method, " +"where it can be read and used:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:43 +#: ../../source/how-to-configure-clients.rst:124 #, fuzzy +msgid "``EvaluateConfig``" +msgstr ":code:`evaluate`" + +#: ../../source/how-to-configure-clients.rst:126 +msgid "" +"``EvaluateConfig`` specifies hyperparameters for the evaluation process, " +"such as the batch size, evaluation frequency, or metrics to compute " +"during evaluation." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:129 +msgid "For example, an ``evaluate_config`` callback might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:143 +msgid "" +"You can pass this ``evaluate_config`` callback to a built-in strategy " +"like ``FedAvg``:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:151 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like ``FedAvg``. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the " -"current round. It then forwards the configuration dictionary to all the " -"clients selected during that round." +"On the client side, the configuration is received in the ``evaluate`` " +"method, where it can be used during the evaluation process:" msgstr "" -"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제공 전략을 사용하는 것입니다. 기본 제공 " -"전략은 소위 구성 함수를 지원합니다. 구성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니다. 그런 " -"다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-configure-clients.rst:49 +#: ../../source/how-to-configure-clients.rst:175 +msgid "Example: Sending Training Configurations" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:177 +#, fuzzy msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"Imagine we want to send (a) the batch size, (b) the current global round," +" and (c) the number of local epochs. Our configuration function could " +"look like this:" msgstr "" "간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) 현재 글로벌 연합 라운드, (c) 클라이언트 " "측에서 학습할 에포크 수를 전송하고 싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-configure-clients.rst:65 +#: ../../source/how-to-configure-clients.rst:190 +msgid "" +"To use this function with a built-in strategy like ``FedAvg``, pass it to" +" the ``FedAvg`` constructor (typically in your ``server_fn``):" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:211 #, fuzzy +msgid "Client-Side Configuration" +msgstr "클라이언트 측 클리핑" + +#: ../../source/how-to-configure-clients.rst:213 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -"``on_fit_config_fn``:" +"On the client side, configurations are received as input to the ``fit`` " +"and ``evaluate`` methods. For example:" msgstr "" -"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:`on_fit_config_fn`을 사용하여 " -"``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-configure-clients.rst:75 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" +#: ../../source/how-to-configure-clients.rst:230 +msgid "Dynamic Configurations per Round" +msgstr "" -#: ../../source/how-to-configure-clients.rst:86 +#: ../../source/how-to-configure-clients.rst:232 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"Configuration functions are called at the beginning of every round. This " +"allows for dynamic adjustments based on progress. For example, you can " +"increase the number of local epochs in later rounds:" msgstr "" -"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. 다른 배치 크기를 사용하기 위해 " -"다른 구성 값을 `evaluate`로 보내려고 할 수 있기 때문에 이 함수는 별도의 함수입니다." -#: ../../source/how-to-configure-clients.rst:90 +#: ../../source/how-to-configure-clients.rst:247 +msgid "Customizing Client Configurations" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:249 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"In some cases, it may be necessary to send different configurations to " +"individual clients. To achieve this, you can create a custom strategy by " +"extending a built-in one, such as ``FedAvg``:" msgstr "" -"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy.configure_fit` 또는 " -"`Strategy.configure_evaluate`가 실행될 때마다). 매 라운드마다 `on_evaluate_config_fn`을" -" 호출하면 연속된 라운드에서 config dict를 변경/변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 " -"위해 하이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" -#: ../../source/how-to-configure-clients.rst:107 -#, fuzzy -msgid "The ``FedAvg`` strategy will call this function *every round*." -msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." +#: ../../source/how-to-configure-clients.rst:254 +msgid "Example: Client-Specific Configuration" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:273 +msgid "Next, use this custom strategy as usual:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:287 +msgid "Summary of Enhancements" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:289 +msgid "**Dynamic Configurations**: Enables per-round adjustments via functions." +msgstr "" -#: ../../source/how-to-configure-clients.rst:110 -msgid "Configuring individual clients" -msgstr "개별 클라이언트 구성" +#: ../../source/how-to-configure-clients.rst:290 +msgid "**Advanced Customization**: Supports client-specific strategies." +msgstr "" -#: ../../source/how-to-configure-clients.rst:112 +#: ../../source/how-to-configure-clients.rst:291 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." +"**Client-Side Integration**: Configurations accessible in ``fit`` and " +"``evaluate``." +msgstr "" -#: ../../source/how-to-configure-clients.rst:115 +#: ../../source/how-to-design-stateful-clients.rst:2 #, fuzzy +msgid "Design stateful ClientApps" +msgstr "클라이언트앱" + +#: ../../source/how-to-design-stateful-clients.rst:20 +msgid "" +"By design, ClientApp_ objects are stateless. This means that the " +"``ClientApp`` object is recreated each time a new ``Message`` is to be " +"processed. This behaviour is identical with Flower's Simulation Engine " +"and Deployment Engine. For the former, it allows us to simulate the " +"running of a large number of nodes on a single machine or across multiple" +" machines. For the latter, it enables each ``SuperNode`` to be part of " +"multiple runs, each running a different ``ClientApp``." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:27 +msgid "" +"When a ``ClientApp`` is executed it receives a Context_. This context is " +"unique for each ``ClientApp``, meaning that subsequent executions of the " +"same ``ClientApp`` from the same node will receive the same ``Context`` " +"object. In the ``Context``, the ``.state`` attribute can be used to store" +" information that you would like the ``ClientApp`` to have access to for " +"the duration of the run. This could be anything from intermediate results" +" such as the history of training losses (e.g. as a list of `float` values" +" with a new entry appended each time the ``ClientApp`` is executed), " +"certain parts of the model that should persist at the client side, or " +"some other arbitrary Python objects. These items would need to be " +"serialized before saving them into the context." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:38 +msgid "Saving metrics to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:40 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " -"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " -"the config dict of a *single client* (only the first client in the list, " -"the other clients in this round to not receive this \"special\" config " -"value):" +"This section will demonstrate how to save metrics such as accuracy/loss " +"values to the Context_ so they can be used in subsequent executions of " +"the ``ClientApp``. If your ``ClientApp`` makes use of NumPyClient_ then " +"entire object is also re-created for each call to methods like ``fit()`` " +"or ``evaluate()``." msgstr "" -"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from scratch " -"`를 통해 수행할 수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " -"추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예입니다: \"world\"`` 구성 키/값 쌍을 *단일 " -"클라이언트*의 config dict에 추가합니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한\" 구성" -" 값을 수신하지 않음):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "로깅 구성" +#: ../../source/how-to-design-stateful-clients.rst:45 +msgid "" +"Let's begin with a simple setting in which ``ClientApp`` is defined as " +"follows. The ``evaluate()`` method only generates a random number and " +"prints it." +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/how-to-design-stateful-clients.rst:50 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +"You can create a PyTorch project with ready-to-use ``ClientApp`` and " +"other components by running ``flwr new``." msgstr "" -"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합니다. 기본적으로 표준 메시지 형식에 따라" -" 정보를 표시합니다:" -#: ../../source/how-to-configure-logging.rst:13 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:81 msgid "" -"containing relevant information including: log message level (e.g. " -"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" +"Let's say we want to save that randomly generated integer and append it " +"to a list that persists in the context. To do that, you'll need to do two" +" key things:" msgstr "" -"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 등 " -"관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-configure-logging.rst:35 -msgid "Saving log to file" -msgstr "파일에 로그 저장" +#: ../../source/how-to-design-stateful-clients.rst:84 +msgid "Make the ``context.state`` reachable withing your client class" +msgstr "" -#: ../../source/how-to-configure-logging.rst:37 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:85 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do ``fl.server.start_server``) and when using " -"the ``VirtualClientEngine`` (i.e. when you do " -"``fl.simulation.start_simulation``). In some situations you might want to" -" save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"Initialise the appropiate record type (in this example we use " +"ConfigsRecord_) and save/read your entry when required." msgstr "" -"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니다. 이는 gRPC 기반 " -"페더레이션(즉,:code:`fl.simulation.start_simulation`를 실행하는 경우)과 " -":code:`VirtualClientEngine`을 사용하는 경우(즉, " -":코드:`fl.simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 이 로그를 " -"디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger.configure() " -"`_" -" 함수를 호출하여 저장할 수 있습니다. 예를 들어:" -#: ../../source/how-to-configure-logging.rst:59 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:123 msgid "" -"With the above, Flower will record the log you see on your terminal to " -"``log.txt``. This file will be created in the same directory as were you " -"are running the code from. If we inspect we see the log above is also " -"recorded but prefixing with ``identifier`` each line:" +"If you run the app, you'll see an output similar to the one below. See " +"how after each round the `n_val` entry in the context gets one additional" +" integer ? Note that the order in which the `ClientApp` logs these " +"messages might differ slightly between rounds." msgstr "" -"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한 " -"디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 " -"붙는 것을 확인할 수 있습니다:" -#: ../../source/how-to-configure-logging.rst:81 -msgid "Log your own messages" -msgstr "나만의 메시지 기록" +#: ../../source/how-to-design-stateful-clients.rst:146 +msgid "Saving model parameters to the context" +msgstr "" -#: ../../source/how-to-configure-logging.rst:83 +#: ../../source/how-to-design-stateful-clients.rst:148 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +"Using ConfigsRecord_ or MetricsRecord_ to save \"simple\" components is " +"fine (e.g., float, integer, boolean, string, bytes, and lists of these " +"types. Note that MetricsRecord_ only supports float, integer, and lists " +"of these types) Flower has a specific type of record, a " +"ParametersRecord_, for storing model parameters or more generally data " +"arrays." msgstr "" -"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과 같이 쉽게 " -"추가할 수 있습니다." -#: ../../source/how-to-configure-logging.rst:114 +#: ../../source/how-to-design-stateful-clients.rst:153 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." +"Let's see a couple of examples of how to save NumPy arrays first and then" +" how to save parameters of PyTorch and TensorFlow models." +msgstr "" -#: ../../source/how-to-configure-logging.rst:140 -msgid "Log to a remote service" -msgstr "원격 서비스에 로그인" +#: ../../source/how-to-design-stateful-clients.rst:158 +msgid "" +"The examples below omit the definition of a ``ClientApp`` to keep the " +"code blocks concise. To make use of ``ParametersRecord`` objects in your " +"``ClientApp`` you can follow the same principles as outlined earlier." +msgstr "" -#: ../../source/how-to-configure-logging.rst:142 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:163 +msgid "Saving NumPy arrays to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:165 +msgid "" +"Elements stored in a `ParametersRecord` are of type Array_, which is a " +"data structure that holds ``bytes`` and metadata that can be used for " +"deserialization. Let's see how to create an ``Array`` from a NumPy array " +"and insert it into a ``ParametersRecord``. Here we will make use of the " +"built-in serialization and deserialization mechanisms in Flower, namely " +"the ``flwr.common.array_from_numpy`` function and the `numpy()` method of" +" an Array_ object." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:174 +msgid "" +"Array_ objects carry bytes as their main payload and additional metadata " +"to use for deserialization. You can implement your own " +"serialization/deserialization if the provided ``array_from_numpy`` " +"doesn't fit your usecase." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:178 +msgid "" +"Let's see how to use those functions to store a NumPy array into the " +"context." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:206 +msgid "" +"To extract the data in a ``ParametersRecord``, you just need to " +"deserialize the array if interest. For example, following the example " +"above:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:223 +msgid "Saving PyTorch parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:225 +msgid "" +"Following the NumPy example above, to save parameters of a PyTorch model " +"a straightforward way of doing so is to transform the parameters into " +"their NumPy representation and then proceed as shown earlier. Below is a " +"simple self-contained example for how to do this." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:263 +msgid "" +"Let say now you want to apply the parameters stored in your context to a " +"new instance of the model (as it happens each time a ``ClientApp`` is " +"executed). You will need to:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:266 +msgid "Deserialize each element in your specific ``ParametersRecord``" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:267 +msgid "Construct a ``state_dict`` and load it" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:287 +msgid "" +"And that's it! Recall that even though this example shows how to store " +"the entire ``state_dict`` in a ``ParametersRecord``, you can just save " +"part of it. The process would be identical, but you might need to adjust " +"how it is loaded into an existing model using PyTorch APIs." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:293 +msgid "Saving Tensorflow/Keras parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:295 msgid "" -"The ``fl.common.logger.configure`` function, also allows specifying a " -"host to which logs can be pushed (via ``POST``) through a native Python " -"``logging.handler.HTTPHandler``. This is a particularly useful feature in" -" ``gRPC``-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a ``HTTPHandler`` should you " -"wish to backup or analyze the logs somewhere else." +"Follow the same steps as done above but replace the ``state_dict`` logic " +"with simply `get_weights() " +"`_" +" to convert the model parameters to a list of NumPy arrays that can then " +"be serialized into an ``Array``. Then, after deserialization, use " +"`set_weights() " +"`_" +" to apply the new parameters to a model." msgstr "" -"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python " -":code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정할 수 " -"있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)에서 로그를 수집하는 것이 번거로울 수 있는 " -":code:`gRPC` 기반 Federated 학습 워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 " -"로그를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:`HTTPHandler`를 지정할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" +#: ../../source/how-to-enable-tls-connections.rst:2 +#, fuzzy +msgid "Enable TLS connections" msgstr "SSL 연결 사용" -#: ../../source/how-to-enable-ssl-connections.rst:4 +#: ../../source/how-to-enable-tls-connections.rst:4 #, fuzzy msgid "" -"This guide describes how to a SSL-enabled secure Flower server " +"This guide describes how to a TLS-enabled secure Flower server " "(``SuperLink``) can be started and how a Flower client (``SuperNode``) " "can establish a secure connections to it." msgstr "" "이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하는 방법과 Flower " "클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하는 방법을 설명합니다." -#: ../../source/how-to-enable-ssl-connections.rst:8 +#: ../../source/how-to-enable-tls-connections.rst:8 msgid "" "A complete code example demonstrating a secure connection can be found " "`here `_'에서 확인할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:11 +#: ../../source/how-to-enable-tls-connections.rst:11 #, fuzzy msgid "" "The code example comes with a ``README.md`` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less " +"start it. Although it is already TLS-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "" "코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. 이미 SSL을 사용하도록 설정되어 " "있지만 그 방법에 대한 설명이 부족할 수 있습니다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." -#: ../../source/how-to-enable-ssl-connections.rst:16 +#: ../../source/how-to-enable-tls-connections.rst:16 msgid "Certificates" msgstr "인증서" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/how-to-enable-tls-connections.rst:18 #, fuzzy msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " +"Using TLS-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " "to ask you to run the script in ``examples/advanced-" @@ -6546,7 +5933,7 @@ msgstr "" "이 과정은 상당히 복잡할 수 있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-" "tensorflow/certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/how-to-enable-tls-connections.rst:29 #, fuzzy msgid "" "This will generate the certificates in ``examples/advanced-" @@ -6555,9 +5942,10 @@ msgstr "" "이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서가 " "생성됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:32 +#: ../../source/how-to-enable-tls-connections.rst:32 +#, fuzzy msgid "" -"The approach for generating SSL certificates in the context of this " +"The approach for generating TLS certificates in the context of this " "example can serve as an inspiration and starting point, but it should not" " be used as a reference for production environments. Please refer to " "other sources regarding the issue of correctly generating certificates " @@ -6569,67 +5957,210 @@ msgstr "" "됩니다. 프로덕션 환경용 인증서를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프로토타이핑 또는 연구 " "프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:40 +#: ../../source/how-to-enable-tls-connections.rst:40 msgid "Server (SuperLink)" msgstr "서버(SuperLink)" -#: ../../source/how-to-enable-ssl-connections.rst:42 +#: ../../source/how-to-enable-tls-connections.rst:42 +#, fuzzy msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"Navigate to the ``examples/advanced-tensorflow`` folder (`here " +"`_) and use the following terminal command to start a server " +"(SuperLink) that uses the previously generated certificates:" msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:52 +#: ../../source/how-to-enable-tls-connections.rst:54 msgid "" "When providing certificates, the server expects a tuple of three " "certificates paths: CA certificate, server certificate and server private" " key." msgstr "인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-enable-ssl-connections.rst:56 -msgid "Client (SuperNode)" +#: ../../source/how-to-enable-tls-connections.rst:58 +#, fuzzy +msgid "Clients (SuperNode)" msgstr "클라이언트(SuperNode)" -#: ../../source/how-to-enable-ssl-connections.rst:58 +#: ../../source/how-to-enable-tls-connections.rst:60 msgid "" "Use the following terminal command to start a client (SuperNode) that " "uses the previously generated certificates:" msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:67 +#: ../../source/how-to-enable-tls-connections.rst:71 #, fuzzy msgid "" "When setting ``root_certificates``, the client expects a file path to " "PEM-encoded root certificates." msgstr "코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 파일 경로를 예상합니다." -#: ../../source/how-to-enable-ssl-connections.rst:73 +#: ../../source/how-to-enable-tls-connections.rst:74 +#, fuzzy +msgid "" +"In another terminal, start a second SuperNode that uses the same " +"certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" + +#: ../../source/how-to-enable-tls-connections.rst:84 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Note that in the second SuperNode, if you run both on the same machine, " +"you must specify a different port for the ``ClientAppIO`` API address to " +"avoid clashing with the first SuperNode." msgstr "" -"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " -"방법을 배웠을 것입니다." -#: ../../source/how-to-enable-ssl-connections.rst:78 -msgid "Additional resources" -msgstr "추가 리소스" +#: ../../source/how-to-enable-tls-connections.rst:89 +msgid "Executing ``flwr run`` with TLS" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-tls-connections.rst:91 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" +"The root certificates used for executing ``flwr run`` is specified in the" +" ``pyproject.toml`` of your app." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:100 +msgid "" +"Note that the path to the ``root-certificates`` is relative to the root " +"of the project. Now, you can run the example by executing the following:" +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:110 +#, fuzzy +msgid "" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an TLS-enabled server and have two clients " +"establish secure connections to it. You should also have learned how to " +"run your Flower project using ``flwr run`` with TLS enabled." +msgstr "" +"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " +"방법을 배웠을 것입니다." + +#: ../../source/how-to-enable-tls-connections.rst:117 +msgid "" +"For running a Docker setup with TLS enabled, please refer to :doc:`docker" +"/enable-tls`." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:121 +msgid "Additional resources" +msgstr "추가 리소스" + +#: ../../source/how-to-enable-tls-connections.rst:123 +msgid "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" -#: ../../source/how-to-enable-ssl-connections.rst:83 +#: ../../source/how-to-enable-tls-connections.rst:126 msgid "`Let's Encrypt `_" msgstr "'암호화하세요 `_'" -#: ../../source/how-to-enable-ssl-connections.rst:84 +#: ../../source/how-to-enable-tls-connections.rst:127 msgid "`certbot `_" msgstr "`인증봇 `_" +#: ../../source/how-to-implement-fedbn.rst:2 +#, fuzzy +msgid "Implement FedBN" +msgstr "전략 구현" + +#: ../../source/how-to-implement-fedbn.rst:4 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training method " +"designed for non-IID data. We are using PyTorch to train a Convolutional " +"Neural Network (with Batch Normalization layers) on the CIFAR-10 dataset." +" When applying FedBN, only minor changes are needed compared to " +":doc:`Quickstart PyTorch `." +msgstr "" +"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " +"`_으로 기존 머신러닝 워크로드의 federated 버전을 구축하기 " +"위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 사용하여 CIFAR-10 데이터 세트에서 컨볼루션 " +"신경망(일괄 정규화 레이어 포함)을 훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " +"연합식으로 ` 와 비교했을 때 몇 가지 사항만 " +"변경 하면 됩니다." + +#: ../../source/how-to-implement-fedbn.rst:12 +#, fuzzy +msgid "Model" +msgstr "mod" + +#: ../../source/how-to-implement-fedbn.rst:14 +msgid "" +"A full introduction to federated learning with PyTorch and Flower can be " +"found in :doc:`Quickstart PyTorch `. This " +"how-to guide varies only a few details in ``task.py``. FedBN requires a " +"model architecture (defined in class ``Net()``) that uses Batch " +"Normalization layers:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:45 +msgid "" +"Try editing the model architecture, then run the project to ensure " +"everything still works:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:52 +msgid "" +"So far this should all look fairly familiar if you've used Flower with " +"PyTorch before." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:55 +msgid "FedBN" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:57 +msgid "" +"To adopt FedBN, only the ``get_parameters`` and ``set_parameters`` " +"functions in ``task.py`` need to be revised. FedBN only changes the " +"client-side by excluding batch normalization parameters from being " +"exchanged with the server." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:61 +#, fuzzy +msgid "" +"We revise the *client* logic by changing ``get_parameters`` and " +"``set_parameters`` in ``task.py``. The batch normalization parameters are" +" excluded from model parameter list when sending to or receiving from the" +" server:" +msgstr "" +"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " +":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" +" 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." + +#: ../../source/how-to-implement-fedbn.rst:90 +msgid "To test the new appraoch, run the project again:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:96 +msgid "" +"Your PyTorch project now runs federated learning with FedBN. " +"Congratulations!" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:99 +msgid "Next Steps" +msgstr "다음 단계" + +#: ../../source/how-to-implement-fedbn.rst:101 +#, fuzzy +msgid "" +"The example is of course over-simplified since all clients load the exact" +" same dataset. This isn't realistic. You now have the tools to explore " +"this topic further. How about using different subsets of CIFAR-10 on each" +" client? How about adding more clients?" +msgstr "" +"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물론 이 예제는 두 " +"클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 " +"주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" +" 클라이언트를 더 추가하는 것은 어떨까요?" + #: ../../source/how-to-implement-strategies.rst:2 msgid "Implement strategies" msgstr "전략 구현" @@ -6994,7 +6525,6 @@ msgid "Install stable release" msgstr "안정적인 릴리즈 설치" #: ../../source/how-to-install-flower.rst:14 -#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "pip 사용" @@ -7107,470 +6637,347 @@ msgstr "" "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 ``simulation`` extr와 함께 " "설치해야 합니다::" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "모니터 시뮬레이션" +#: ../../source/how-to-run-simulations.rst:22 +msgid "Run simulations" +msgstr "시뮬레이션 실행" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/how-to-run-simulations.rst:24 +#, fuzzy msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Simulating Federated Learning workloads is useful for a multitude of use " +"cases: you might want to run your workload on a large cohort of clients " +"without having to source, configure, and manage a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without going through " +"a complex setup process; you might want to validate your algorithm in " +"different scenarios at varying levels of data and system heterogeneity, " +"client availability, privacy budgets, etc. These are among some of the " +"use cases where simulating FL workloads makes sense." msgstr "" -"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있습니다. 또한 Flower 시뮬레이션 엔진은 " -"강력하며 클라이언트별 리소스 할당 방법을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통해 더 현명한 " -"결정을 내리고 실행 시간을 단축할 수 있습니다." +"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클라이언트 집단에서 워크로드를 실행하되 많은 " +"수의 물리적 장치를 소싱, 구성 및 관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨팅 시스템에서 " +"최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 " +"다양한 시나리오에서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사례는 FL 워크로드 시뮬레이션이 " +"적합한 사용 사례 중 일부입니다. Flower는 `VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " +"있습니다." -#: ../../source/how-to-monitor-simulation.rst:9 +#: ../../source/how-to-run-simulations.rst:33 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"Flower's ``Simulation Engine`` schedules, launches, and manages " +"|clientapp_link|_ instances. It does so through a ``Backend``, which " +"contains several workers (i.e., Python processes) that can execute a " +"``ClientApp`` by passing it a |context_link|_ and a |message_link|_. " +"These ``ClientApp`` objects are identical to those used by Flower's " +"`Deployment Engine `_, making " +"alternating between *simulation* and *deployment* an effortless process. " +"The execution of ``ClientApp`` objects through Flower's ``Simulation " +"Engine`` is:" msgstr "" -"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관리자가 설치되어 있다고 " -"가정합니다." -#: ../../source/how-to-monitor-simulation.rst:13 -msgid "Downloads" -msgstr "다운로드" - -#: ../../source/how-to-monitor-simulation.rst:19 +#: ../../source/how-to-run-simulations.rst:41 +#, fuzzy msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"**Resource-aware**: Each backend worker executing ``ClientApp``\\s gets " +"assigned a portion of the compute and memory on your system. You can " +"define these at the beginning of the simulation, allowing you to control " +"the degree of parallelism of your simulation. For a fixed total pool of " +"resources, the fewer the resources per backend worker, the more " +"``ClientApps`` can run concurrently on the same hardware." msgstr "" -"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " -"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 도구는 모두 Flower가 " -"내부적으로 사용하는 `Ray `_와 잘 통합되어 있습니다." - -#: ../../source/how-to-monitor-simulation.rst:23 -msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." - -#: ../../source/how-to-monitor-simulation.rst:26 -msgid "If you are on an M1 Mac, it should be:" -msgstr "M1 Mac을 사용 중이라면:" - -#: ../../source/how-to-monitor-simulation.rst:33 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "이전 세대 Intel Mac 장치에서는:" - -#: ../../source/how-to-monitor-simulation.rst:40 -msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" -msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" +"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할당받는다는 것을 의미합니다. 사용자는 " +"시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " +"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:51 +#: ../../source/how-to-run-simulations.rst:46 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"**Batchable**: When there are more ``ClientApps`` to execute than backend" +" workers, ``ClientApps`` are queued and executed as soon as resources are" +" freed. This means that ``ClientApps`` are typically executed in batches " +"of N, where N is the number of backend workers." msgstr "" -"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정을 붙여넣습니다. 요구 사항에 따라 시간 " -"간격을 조정할 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:67 +#: ../../source/how-to-run-simulations.rst:50 +#, fuzzy msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"**Self-managed**: This means that you, as a user, do not need to launch " +"``ClientApps`` manually; instead, the ``Simulation Engine``'s internals " +"orchestrates the execution of all ``ClientApp``\\s." msgstr "" -"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 수행합니다. 이전과 마찬가지로 다음 " -"명령 중 하나를 사용하여 파일을 엽니다:" - -#: ../../source/how-to-monitor-simulation.rst:78 -msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." - -#: ../../source/how-to-monitor-simulation.rst:94 -msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." - -#: ../../source/how-to-monitor-simulation.rst:98 -msgid "Tracking metrics" -msgstr "매트릭 트래킹" - -#: ../../source/how-to-monitor-simulation.rst:100 -msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." - -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." - -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "Now, you are ready to start your workload." -msgstr "이제 워크로드를 시작할 준비가 되었습니다." - -#: ../../source/how-to-monitor-simulation.rst:121 -msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" +"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " +":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/how-to-monitor-simulation.rst:127 +#: ../../source/how-to-run-simulations.rst:53 #, fuzzy -msgid "You can look at everything at http://127.0.0.1:8265 ." -msgstr "``_ 에서 모든 것을 볼 수 있습니다." - -#: ../../source/how-to-monitor-simulation.rst:129 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." +"**Ephemeral**: This means that a ``ClientApp`` is only materialized when " +"it is required by the application (e.g., to do `fit() `_). The object is destroyed afterward, " +"releasing the resources it was assigned and allowing other clients to " +"participate." +msgstr "" +"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니다(예: `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" +" 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-run-simulations.rst:60 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"You can preserve the state (e.g., internal variables, parts of an ML " +"model, intermediate results) of a ``ClientApp`` by saving it to its " +"``Context``. Check the `Designing Stateful Clients `_ guide for a complete walkthrough." msgstr "" -"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인할 수도 있습니다. Ray 대시보드는 " -"시뮬레이션 중에만 액세스할 수 있다는 점에 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " -"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:137 +#: ../../source/how-to-run-simulations.rst:65 #, fuzzy msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port ``3000`` on your" -" machine as long as they are running." +"The ``Simulation Engine`` delegates to a ``Backend`` the role of spawning" +" and managing ``ClientApps``. The default backend is the ``RayBackend``, " +"which uses `Ray `_, an open-source framework for " +"scalable Python workloads. In particular, each worker is an `Actor " +"`_ capable of " +"spawning a ``ClientApp`` given its ``Context`` and a ``Message`` to " +"process." msgstr "" -"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " -":code:`3000` 등을 차단하므로 이 작업이 중요합니다." +":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " +"`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " +":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." -#: ../../source/how-to-monitor-simulation.rst:147 -msgid "Resource allocation" -msgstr "리소스 할당" +#: ../../source/how-to-run-simulations.rst:73 +msgid "Launch your Flower simulation" +msgstr "Flower 시뮬레이션 시작" -#: ../../source/how-to-monitor-simulation.rst:149 +#: ../../source/how-to-run-simulations.rst:75 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." +"Running a simulation is straightforward; in fact, it is the default mode " +"of operation for |flwr_run_link|_. Therefore, running Flower simulations " +"primarily requires you to first define a ``ClientApp`` and a " +"``ServerApp``. A convenient way to generate a minimal but fully " +"functional Flower app is by means of the |flwr_new_link|_ command. There " +"are multiple templates to choose from. The example below uses the " +"``PyTorch`` template." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:152 +#: ../../source/how-to-run-simulations.rst:83 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"If you haven't already, install Flower via ``pip install -U flwr`` in a " +"Python environment." msgstr "" -"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능한 모든 리소스를 사용하여 시작되며, 이 리소스는 " -"클라이언트 간에 공유됩니다. 그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동시에 모델 학습이 이루어지는" -" 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" - -#: ../../source/how-to-monitor-simulation.rst:164 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-run-simulations.rst:91 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" - -#: ../../source/how-to-monitor-simulation.rst:195 -msgid "Let’s also specify the resource for a single client." -msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." +"Then, follow the instructions shown after completing the |flwr_new_link|_" +" command. When you execute |flwr_run_link|_, you'll be using the " +"``Simulation Engine``." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:225 +#: ../../source/how-to-run-simulations.rst:94 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"If we take a look at the ``pyproject.toml`` that was generated from the " +"|flwr_new_link|_ command (and loaded upon |flwr_run_link|_ execution), we" +" see that a *default* federation is defined. It sets the number of " +"supernodes to 10." msgstr "" -"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는 등) 새 " -"클라이언트를 시작합니다." -#: ../../source/how-to-monitor-simulation.rst:228 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " -"two clients and therefore enable them to run concurrently. Be careful not" -" to require more resources than available. If you specified " -"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" -" GPUs but decided to set 1 in ``ray_init_args``)." +"You can modify the size of your simulations by adjusting ``options.num-" +"supernodes``." msgstr "" -"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않습니다. :code:`client_num_gpus = " -"0.5` 를 설정하면 두 개의 클라이언트를 실행할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소스를" -" 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬레이션이 시작되지 않습니다(GPU가 " -"2개이지만 :code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." - -#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "자주 묻는 질문" -#: ../../source/how-to-monitor-simulation.rst:237 -msgid "Q: I don't see any metrics logged." -msgstr "질문: 기록된 메트릭이 보이지 않습니다." +#: ../../source/how-to-run-simulations.rst:109 +msgid "Simulation examples" +msgstr "시뮬레이션 예제" -#: ../../source/how-to-monitor-simulation.rst:239 +#: ../../source/how-to-run-simulations.rst:111 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"In addition to the quickstart tutorials in the documentation (e.g., " +"`quickstart PyTorch Tutorial `_, " +"`quickstart JAX Tutorial `_), most examples" +" in the Flower repository are simulation-ready." msgstr "" -"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). 시뮬레이션이 실행된 " -"기간을 반영하도록 기간을 변경해 주세요." -#: ../../source/how-to-monitor-simulation.rst:243 +#: ../../source/how-to-run-simulations.rst:116 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"`Quickstart TensorFlow/Keras " +"`_." msgstr "" -"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가 실행 중인지 " -"확인하고 이 페이지를 새로고침하세요.\"라는 메시지가 표시됩니다." +"이것이 바로`Tensorflow/Keras Simulation " +"`_ 예제에서 사용된 메커니즘입니다." -#: ../../source/how-to-monitor-simulation.rst:246 +#: ../../source/how-to-run-simulations.rst:118 +#, fuzzy msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" +"`Quickstart PyTorch `_" +msgstr "" +"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: ../../source/how-to-monitor-simulation.rst:252 +#: ../../source/how-to-run-simulations.rst:120 #, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"http://127.0.0.1:8265." -msgstr "Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." +"`Advanced PyTorch `_" +msgstr "" +"보안 연결을 보여주는 전체 코드 예제는 '여기 " +"`_'에서 확인할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:254 +#: ../../source/how-to-run-simulations.rst:122 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." +"`Quickstart MLX `_" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:257 -msgid "Resources" -msgstr "리소스" +#: ../../source/how-to-run-simulations.rst:123 +msgid "" +"`ViT fine-tuning `_" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:259 +#: ../../source/how-to-run-simulations.rst:125 #, fuzzy msgid "" -"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" -"started.html" +"The complete list of examples can be found in `the Flower GitHub " +"`_." msgstr "" -"Ray 대시보드: ``_" +"보안 연결을 보여주는 전체 코드 예제는 '여기 " +"`_'에서 확인할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:261 +#: ../../source/how-to-run-simulations.rst:131 #, fuzzy -msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" -msgstr "Ray 메트릭: ``_" - -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "시뮬레이션 실행" +msgid "Defining ``ClientApp`` resources" +msgstr "클라이언트 리소스 할당" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-run-simulations.rst:133 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"By default, the ``Simulation Engine`` assigns two CPU cores to each " +"backend worker. This means that if your system has 10 CPU cores, five " +"backend workers can be running in parallel, each executing a different " +"``ClientApp`` instance." msgstr "" -"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클라이언트 집단에서 워크로드를 실행하되 많은 " -"수의 물리적 장치를 소싱, 구성 및 관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨팅 시스템에서 " -"최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 " -"다양한 시나리오에서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사례는 FL 워크로드 시뮬레이션이 " -"적합한 사용 사례 중 일부입니다. Flower는 `VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " -"있습니다." -#: ../../source/how-to-run-simulations.rst:19 +#: ../../source/how-to-run-simulations.rst:137 #, fuzzy msgid "" -"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -"``VirtualClientEngine`` are:" +"More often than not, you would probably like to adjust the resources your" +" ``ClientApp`` gets assigned based on the complexity (i.e., compute and " +"memory footprint) of your workload. You can do so by adjusting the " +"backend resources for your federation." msgstr "" -":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니다. 이러한 클라이언트는 " -"`non-virtual` 클라이언트(예: `flwr.client.start_client `_ 명령을 통해 실행하는 클라이언트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 " -"방식으로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언트는 다음과 같습니다:" +"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이언트에 할당되는 리소스를 조정하고 싶을 것입니다." +" 시뮬레이션을 시작할 때 `client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 " +"내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/how-to-run-simulations.rst:26 +#: ../../source/how-to-run-simulations.rst:143 +#, python-format msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"Note that the resources the backend assigns to each worker (and hence to " +"each ``ClientApp`` being executed) are assigned in a *soft* manner. This " +"means that the resources are primarily taken into account in order to " +"control the degree of parallelism at which ``ClientApp`` instances should" +" be executed. Resource assignment is **not strict**, meaning that if you " +"specified your ``ClientApp`` is assumed to make use of 25% of the " +"available VRAM but it ends up using 50%, it might cause other " +"``ClientApp`` instances to crash throwing an out-of-memory (OOM) error." msgstr "" -"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할당받는다는 것을 의미합니다. 사용자는 " -"시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " -"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:31 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to ``VirtualClientEngine``'s " -"internals." +"Customizing resources can be done directly in the ``pyproject.toml`` of " +"your app." msgstr "" -"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " -":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/how-to-run-simulations.rst:33 +#: ../../source/how-to-run-simulations.rst:160 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"With the above backend settings, your simulation will run as many " +"``ClientApps`` in parallel as CPUs you have in your system. GPU resources" +" for your ``ClientApp`` can be assigned by specifying the **ratio** of " +"VRAM each should make use of." msgstr "" -"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니다(예: `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" -" 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: ../../source/how-to-run-simulations.rst:38 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:173 msgid "" -"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." +"If you are using TensorFlow, you need to `enable memory growth " +"`_ so " +"multiple ``ClientApp`` instances can share a GPU. This needs to be done " +"before launching the simulation. To do so, set the environment variable " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"``." msgstr "" -":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " -"`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " -":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." - -#: ../../source/how-to-run-simulations.rst:45 -msgid "Launch your Flower simulation" -msgstr "Flower 시뮬레이션 시작" -#: ../../source/how-to-run-simulations.rst:47 +#: ../../source/how-to-run-simulations.rst:179 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"Let's see how the above configuration results in a different number of " +"``ClientApps`` running in parallel depending on the resources available " +"in your system. If your system has:" msgstr "" -"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함수를 정의하여 데이터 세트를 다운로드하고 로드(및" -" 파티션)해야 합니다. 이 작업을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 " -"같습니다:" - -#: ../../source/how-to-run-simulations.rst:73 -msgid "VirtualClientEngine resources" -msgstr "VirtualClientEngine 리소스" -#: ../../source/how-to-run-simulations.rst:75 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:183 +#, python-format msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -"``ray_init_args`` input argument to ``start_simulation`` which the VCE " -"internally passes to Ray's ``ray.init`` command. For a complete list of " -"settings you can configure check the `ray.init " -"`_" -" documentation. Do not set ``ray_init_args`` if you want the VCE to use " -"all your system's CPUs and GPUs." +"10x CPUs and 1x GPU: at most 4 ``ClientApps`` will run in parallel since " +"each requires 25% of the available VRAM." msgstr "" -"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 있으며, 이는 Ray를 시작할 때의 기본" -" 동작이기도 합니다. 그러나 일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 설정은 " -"VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:`start_simulation`에 대한 " -":code:`ray_init_args` 입력 인수를 통해 수행할 수 있습니다. 구성할 수 있는 전체 설정 목록은 `ray.init " -"`_" -" 설명서를 확인하세요. VCE가 시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지" -" 마세요." -#: ../../source/how-to-run-simulations.rst:97 -msgid "Assigning client resources" -msgstr "클라이언트 리소스 할당" +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"10x CPUs and 2x GPUs: at most 8 ``ClientApps`` will run in parallel " +"(VRAM-limited)." +msgstr "" -#: ../../source/how-to-run-simulations.rst:99 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:186 msgid "" -"By default the ``VirtualClientEngine`` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has " -"10 cores, that many virtual clients can be concurrently running." +"6x CPUs and 4x GPUs: at most 6 ``ClientApps`` will run in parallel (CPU-" +"limited)." msgstr "" -"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어를 할당합니다(그 외에는 " -"아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:103 +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"10x CPUs but 0x GPUs: you won't be able to run the simulation since not " +"even the resources for a single ``ClientApp`` can be met." msgstr "" -"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이언트에 할당되는 리소스를 조정하고 싶을 것입니다." -" 시뮬레이션을 시작할 때 `client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 " -"내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/how-to-run-simulations.rst:110 -#, fuzzy -msgid "``num_cpus`` indicates the number of CPU cores a client would get." -msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." +#: ../../source/how-to-run-simulations.rst:190 +msgid "" +"A generalization of this is given by the following equation. It gives the" +" maximum number of ``ClientApps`` that can be executed in parallel on " +"available CPU cores (SYS_CPUS) and VRAM (SYS_GPUS)." +msgstr "" -#: ../../source/how-to-run-simulations.rst:111 -#, fuzzy -msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." -msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." +#: ../../source/how-to-run-simulations.rst:194 +msgid "" +"N = \\min\\left(\\left\\lfloor \\frac{\\text{SYS_CPUS}}{\\text{num_cpus}}" +" \\right\\rfloor, \\left\\lfloor " +"\\frac{\\text{SYS_GPUS}}{\\text{num_gpus}} \\right\\rfloor\\right)" +msgstr "" -#: ../../source/how-to-run-simulations.rst:113 -msgid "Let's see a few examples:" -msgstr "몇 가지 예를 살펴보겠습니다:" +#: ../../source/how-to-run-simulations.rst:198 +msgid "" +"Both ``num_cpus`` (an integer higher than 1) and ``num_gpus`` (a non-" +"negative real number) should be set on a per ``ClientApp`` basis. If, for" +" example, you want only a single ``ClientApp`` to run on each GPU, then " +"set ``num_gpus=1.0``. If, for example, a ``ClientApp`` requires access to" +" two whole GPUs, you'd set ``num_gpus=2``." +msgstr "" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:203 #, fuzzy msgid "" -"While the ``client_resources`` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " +"While the ``options.backend.client-resources`` can be used to control the" +" degree of concurrency in your simulations, this does not stop you from " +"running hundreds or even thousands of clients in the same round and " +"having orders of magnitude more *dormant* (i.e., not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " -"client sampled by the strategy) and then will execute them in a resource-" -"aware manner in batches of 8." +" system can only accommodate 8 clients concurrently. The ``Simulation " +"Engine`` will schedule 100 ``ClientApps`` to run and then will execute " +"them in a resource-aware manner in batches of 8." msgstr "" "code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있지만, 동일한 라운드에서 수십, " "수백 또는 수천 개의 클라이언트를 실행하고 훨씬 더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 " @@ -7578,155 +6985,152 @@ msgstr "" "code:`VirtualClientEngine`은 실행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 " "다음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." -#: ../../source/how-to-run-simulations.rst:140 +#: ../../source/how-to-run-simulations.rst:212 +#, fuzzy +msgid "Simulation Engine resources" +msgstr "VirtualClientEngine 리소스" + +#: ../../source/how-to-run-simulations.rst:214 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"By default, the ``Simulation Engine`` has **access to all system " +"resources** (i.e., all CPUs, all GPUs). However, in some settings, you " +"might want to limit how many of your system resources are used for " +"simulation. You can do this in the ``pyproject.toml`` of your app by " +"setting the ``options.backend.init_args`` variable." msgstr "" -"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정의하는 방법에 대한 모든 복잡한 세부 사항을 " -"이해하려면 'Ray 문서 '를 참조하세요." -#: ../../source/how-to-run-simulations.rst:145 -msgid "Simulation examples" -msgstr "시뮬레이션 예제" +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"With the above setup, the Backend will be initialized with a single CPU " +"and GPU. Therefore, even if more CPUs and GPUs are available in your " +"system, they will not be used for the simulation. The example above " +"results in a single ``ClientApp`` running at any given point." +msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:233 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"For a complete list of settings you can configure, check the `ray.init " +"`_" +" documentation." msgstr "" -"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이션 예제는 `Flower 레포지토리 " -"`_에서 제공됩니다. Google Colab에서도 실행할 수 있습니다:" -#: ../../source/how-to-run-simulations.rst:151 +#: ../../source/how-to-run-simulations.rst:236 +msgid "For the highest performance, do not set ``options.backend.init_args``." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:239 +#, fuzzy +msgid "Simulation in Colab/Jupyter" +msgstr "CLI 시뮬레이션" + +#: ../../source/how-to-run-simulations.rst:241 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"The preferred way of running simulations should always be " +"|flwr_run_link|_. However, the core functionality of the ``Simulation " +"Engine`` can be used from within a Google Colab or Jupyter environment by" +" means of `run_simulation `_." msgstr "" -"`Tensorflow/Keras 시뮬레이션 " -"`_: 100개의 클라이언트가 공동으로 MNIST에서 MLP 모델을 훈련합니다." -#: ../../source/how-to-run-simulations.rst:154 +#: ../../source/how-to-run-simulations.rst:262 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"With ``run_simulation``, you can also control the amount of resources for" +" your ``ClientApp`` instances. Do so by setting ``backend_config``. If " +"unset, the default resources are assigned (i.e., 2xCPUs per ``ClientApp``" +" and no GPU)." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:273 +msgid "" +"Refer to the `30 minutes Federated AI Tutorial " +"`_ for a complete example on how to " +"run Flower Simulations in Colab." msgstr "" -"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: ../../source/how-to-run-simulations.rst:159 +#: ../../source/how-to-run-simulations.rst:280 msgid "Multi-node Flower simulations" msgstr "멀티 노드 Flower 시뮬레이션" -#: ../../source/how-to-run-simulations.rst:161 +#: ../../source/how-to-run-simulations.rst:282 #, fuzzy msgid "" -"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure" -" that you:" +"Flower's ``Simulation Engine`` allows you to run FL simulations across " +"multiple compute nodes so that you're not restricted to running " +"simulations on a _single_ machine. Before starting your multi-node " +"simulation, ensure that you:" msgstr "" "Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬레이션을 실행할 수 " "있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항을 확인하세요:" -#: ../../source/how-to-run-simulations.rst:164 -msgid "Have the same Python environment in all nodes." +#: ../../source/how-to-run-simulations.rst:286 +#, fuzzy +msgid "Have the same Python environment on all nodes." msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." -#: ../../source/how-to-run-simulations.rst:165 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/how-to-run-simulations.rst:287 +#, fuzzy +msgid "Have a copy of your code on all nodes." msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." -#: ../../source/how-to-run-simulations.rst:166 +#: ../../source/how-to-run-simulations.rst:288 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +"Have a copy of your dataset on all nodes. If you are using partitions " +"from `Flower Datasets `_, ensure the " +"partitioning strategy its parameterization are the same. The expectation " +"is that the i-th dataset partition is identical in all nodes." msgstr "" -"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation considerations " -"`에서 확인하세요)" -#: ../../source/how-to-run-simulations.rst:168 +#: ../../source/how-to-run-simulations.rst:292 #, fuzzy msgid "" -"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " -"``VirtualClientEngine`` attaches to a running Ray instance." -msgstr "" -":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 " -":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." - -#: ../../source/how-to-run-simulations.rst:171 -#, fuzzy -msgid "" -"Start Ray on you head node: on the terminal type ``ray start --head``. " +"Start Ray on your head node: on the terminal, type ``ray start --head``. " "This command will print a few lines, one of which indicates how to attach" " other nodes to the head node." msgstr "" "헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 명령은 몇 줄을 출력하며, 그" " 중 하나는 다른 노드를 헤드 노드에 연결하는 방법을 나타냅니다." -#: ../../source/how-to-run-simulations.rst:174 +#: ../../source/how-to-run-simulations.rst:295 #, fuzzy msgid "" "Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -"``ray start --address='192.168.1.132:6379'``" +"starting the head and execute it on the terminal of a new node (before " +"executing |flwr_run_link|_). For example: ``ray start " +"--address='192.168.1.132:6379'``. Note that to be able to attach nodes to" +" the head node they should be discoverable by each other." msgstr "" "헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드의 터미널에서 실행합니다: 예: :code:`ray" " start --address='192.168.1.132:6379'`" -#: ../../source/how-to-run-simulations.rst:178 +#: ../../source/how-to-run-simulations.rst:300 +#, fuzzy msgid "" "With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"would if the simulation were running on a single node. In other words:" msgstr "위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 헤드 노드에서 코드를 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:181 +#: ../../source/how-to-run-simulations.rst:308 #, fuzzy msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command ``ray stop`` in each node's terminal " -"(including the head node)." +"Once your simulation is finished, if you'd like to dismantle your " +"cluster, you simply need to run the command ``ray stop`` in each node's " +"terminal (including the head node)." msgstr "" "시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널에서 :code:`ray stop` 명령을 실행하기만 " "하면 됩니다." -#: ../../source/how-to-run-simulations.rst:185 -msgid "Multi-node simulation good-to-know" -msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" - -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" - -#: ../../source/how-to-run-simulations.rst:189 -#, fuzzy -msgid "" -"User ``ray status`` to check all nodes connected to your head node as " -"well as the total resources available to the ``VirtualClientEngine``." -msgstr "" -"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 " -":code:`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." - -#: ../../source/how-to-run-simulations.rst:192 +#: ../../source/how-to-run-simulations.rst:313 #, fuzzy msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -"``VirtualClientEngine`` can schedule as many `virtual` clients as that " -"node can possible run. In some settings you might want to exclude certain" -" resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in " -"any ``ray start`` command (including when starting the head)" +"When attaching a new node to the head, all its resources (i.e., all CPUs," +" all GPUs) will be visible by the head node. This means that the " +"``Simulation Engine`` can schedule as many ``ClientApp`` instances as " +"that node can possibly run. In some settings, you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"``--num-cpus=`` and/or ``--num-" +"gpus=`` in any ``ray start`` command (including when " +"starting the head)." msgstr "" "새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 헤드 노드에 표시됩니다. 즉, " ":code:`VirtualClientEngine`은 해당 노드가 실행할 수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. " @@ -7734,179 +7138,176 @@ msgstr "" "포함)에 `--num-cpus=` 및/또는 `--num-" "gpus=`를 추가하여 이 작업을 수행하면 됩니다" -#: ../../source/how-to-run-simulations.rst:202 -msgid "Considerations for simulations" -msgstr "시뮬레이션 시 고려 사항" +#: ../../source/how-to-run-simulations.rst:322 +#, fuzzy +msgid "FAQ for Simulations" +msgstr "시뮬레이션 실행" + +#: ../../source/how-to-run-simulations.rst +msgid "Can I make my ``ClientApp`` instances stateful?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:206 +#: ../../source/how-to-run-simulations.rst:326 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." +"Yes. Use the ``state`` attribute of the |context_link|_ object that is " +"passed to the ``ClientApp`` to save variables, parameters, or results to " +"it. Read the `Designing Stateful Clients `_ guide for a complete walkthrough." +msgstr "" + +#: ../../source/how-to-run-simulations.rst +msgid "Can I run multiple simulations on the same machine?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:209 +#: ../../source/how-to-run-simulations.rst:330 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Yes, but bear in mind that each simulation isn't aware of the resource " +"usage of the other. If your simulations make use of GPUs, consider " +"setting the ``CUDA_VISIBLE_DEVICES`` environment variable to make each " +"simulation use a different set of the available GPUs. Export such an " +"environment variable before starting |flwr_run_link|_." msgstr "" -"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 고성능 GPU 노드에서 복잡한 FL 파이프라인을 " -"훈련하든 상관없이 시뮬레이션 모드에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가하는 " -"동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 가지 사항을 강조합니다. 또한 현재 구현에서 몇 " -"가지 제한 사항을 강조합니다." -#: ../../source/how-to-run-simulations.rst:217 -msgid "GPU resources" -msgstr "GPU 리소스" +#: ../../source/how-to-run-simulations.rst +msgid "" +"Do the CPU/GPU resources set for each ``ClientApp`` restrict how much " +"compute/memory these make use of?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:219 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:334 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -"``num_gpus`` in ``client_resources``. This being said, Ray (used " -"internally by the VCE) is by default:" +"No. These resources are exclusively used by the simulation backend to " +"control how many workers can be created on startup. Let's say N backend " +"workers are launched, then at most N ``ClientApp`` instances will be " +"running in parallel. It is your responsibility to ensure ``ClientApp`` " +"instances have enough resources to execute their workload (e.g., fine-" +"tune a transformer model)." msgstr "" -"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트에 GPU 메모리 " -"공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 사용됩니다:" -#: ../../source/how-to-run-simulations.rst:222 -#, fuzzy +#: ../../source/how-to-run-simulations.rst +msgid "My ``ClientApp`` is triggering OOM on my GPU. What should I do?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:338 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set ``num_gpus=0.5`` and you have two GPUs in your system with different" -" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " -"concurrently." +"It is likely that your `num_gpus` setting, which controls the number of " +"``ClientApp`` instances that can share a GPU, is too low (meaning too " +"many ``ClientApps`` share the same GPU). Try the following:" msgstr "" -"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: 32GB와 8GB) VRAM 용량을 가진 두" -" 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." -#: ../../source/how-to-run-simulations.rst:225 +#: ../../source/how-to-run-simulations.rst:340 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"Set your ``num_gpus=1``. This will make a single ``ClientApp`` run on a " +"GPU." msgstr "" -"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서 두 가지 시사점을 " -"얻을 수 있습니다:" -#: ../../source/how-to-run-simulations.rst:228 +#: ../../source/how-to-run-simulations.rst:341 +msgid "Inspect how much VRAM is being used (use ``nvidia-smi`` for this)." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:342 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Based on the VRAM you see your single ``ClientApp`` using, calculate how " +"many more would fit within the remaining VRAM. One divided by the total " +"number of ``ClientApps`` is the ``num_gpus`` value you should set." msgstr "" -"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다(예: `evaluate method `_를 사용할 때)" -#: ../../source/how-to-run-simulations.rst:231 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:344 +msgid "Refer to :ref:`clientappresources` for more details." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:346 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +"If your ``ClientApp`` is using TensorFlow, make sure you are exporting " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"`` before starting your simulation. For " +"more details, check." msgstr "" -"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시작할 때 " -":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." -#: ../../source/how-to-run-simulations.rst:235 -#, fuzzy +#: ../../source/how-to-run-simulations.rst msgid "" -"In addition, the GPU resource limits passed to ``client_resources`` are " -"not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"How do I know what's the right ``num_cpus`` and ``num_gpus`` for my " +"``ClientApp``?" msgstr "" -"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, 초과할 수 있음) " -"클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 VRAM을 사용하는 상황이 발생할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:240 -msgid "TensorFlow with GPUs" -msgstr "GPU를 사용한 TensorFlow" +#: ../../source/how-to-run-simulations.rst:350 +msgid "" +"A good practice is to start by running the simulation for a few rounds " +"with higher ``num_cpus`` and ``num_gpus`` than what is really needed " +"(e.g., ``num_cpus=8`` and, if you have a GPU, ``num_gpus=1``). Then " +"monitor your CPU and GPU utilization. For this, you can make use of tools" +" such as ``htop`` and ``nvidia-smi``. If you see overall resource " +"utilization remains low, try lowering ``num_cpus`` and ``num_gpus`` " +"(recall this will make more ``ClientApp`` instances run in parallel) " +"until you see a satisfactory system resource utilization." +msgstr "" -#: ../../source/how-to-run-simulations.rst:242 +#: ../../source/how-to-run-simulations.rst:352 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"Note that if the workload on your ``ClientApp`` instances is not " +"homogeneous (i.e., some come with a larger compute or memory footprint), " +"you'd probably want to focus on those when coming up with a good value " +"for ``num_gpus`` and ``num_cpus``." msgstr "" -"`TensorFlow와 함께 GPU를 사용 `_하면 프로세스에 " -"보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목적으로 TensorFlow에서 수행됩니다. 그러나 " -"GPU를 여러 개의 '가상' 클라이언트로 분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니다. 다행히도 " -"'메모리 증가 활성화 " -"`_'를 통해 " -"이 기본 동작을 비활성화할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:249 -#, fuzzy +#: ../../source/how-to-run-simulations.rst +msgid "Can I assign different resources to each ``ClientApp`` instance?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:356 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " -"order to specify a function to be executed upon actor initialization. In " -"this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"No. All ``ClientApp`` objects are assumed to make use of the same " +"``num_cpus`` and ``num_gpus``. When setting these values (refer to " +":ref:`clientappresources` for more details), ensure the ``ClientApp`` " +"with the largest memory footprint (either RAM or VRAM) can run in your " +"system with others like it in parallel." msgstr "" -"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행해야 합니다. " -":code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 실행할 " -"함수를 지정할 수 있습니다. 이 경우 TF 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" -#: ../../source/how-to-run-simulations.rst:272 +#: ../../source/how-to-run-simulations.rst msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Can I run single simulation accross multiple compute nodes (e.g. GPU " +"servers)?" msgstr "" -"이것이 바로`Tensorflow/Keras Simulation " -"`_ 예제에서 사용된 메커니즘입니다." -#: ../../source/how-to-run-simulations.rst:276 -msgid "Multi-node setups" -msgstr "멀티 노드 설정" +#: ../../source/how-to-run-simulations.rst:360 +msgid "" +"Yes. If you are using the ``RayBackend`` (the *default* backend) you can " +"first interconnect your nodes through Ray's cli and then launch the " +"simulation. Refer to :ref:`multinodesimulations` for a step-by-step " +"guide." +msgstr "" -#: ../../source/how-to-run-simulations.rst:278 +#: ../../source/how-to-run-simulations.rst msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"My ``ServerApp`` also needs to make use of the GPU (e.g., to do " +"evaluation of the *global model* after aggregation). Is this GPU usage " +"taken into account by the ``Simulation Engine``?" msgstr "" -"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제공하지 않습니다. 즉, 클라이언트가 실행하는 데 " -"필요한 리소스가 하나 이상의 노드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있습니다. FL 프로세스" -" 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 " -"방식에 따라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 위해 데이터 세트 제공 메커니즘(예: " -"nfs, 데이터베이스 사용)을 사용해야 할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:286 +#: ../../source/how-to-run-simulations.rst:364 +msgid "" +"No. The ``Simulation Engine`` only manages ``ClientApps`` and therefore " +"is only aware of the system resources they require. If your ``ServerApp``" +" makes use of substantial compute or memory resources, factor that into " +"account when setting ``num_cpus`` and ``num_gpus``." +msgstr "" + +#: ../../source/how-to-run-simulations.rst +msgid "" +"Can I indicate on what resource a specific instance of a ``ClientApp`` " +"should run? Can I do resource placement?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:368 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Currently, the placement of ``ClientApp`` instances is managed by the " +"``RayBackend`` (the only backend available as of ``flwr==1.13.0``) and " +"cannot be customized. Implementing a *custom* backend would be a way of " +"achieving resource placement." msgstr "" -"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 상태는 Flower 클라이언트 클래스의 일부로 구현할" -" 수 있지만, 사용자는 이를 영구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없이 동일한 " -"클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 " -"위의 요점과도 관련이 있습니다." #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +#, fuzzy +msgid "Save and Load Model Checkpoints" msgstr "모델 체크포인트 저장 및 로드" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 @@ -7919,7 +7320,8 @@ msgstr "" "저장(및 로드)하는 단계에 대해 설명합니다." #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" +#, fuzzy +msgid "Model Checkpointing" msgstr "모델 체크포인트" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 @@ -7942,11 +7344,12 @@ msgstr "" ":code:`aggregate_fit`을 사용자 지정합니다. 그런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 " "반환된(집계된) 가중치를 계속 저장합니다:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 -msgid "Save and load PyTorch checkpoints" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:58 +#, fuzzy +msgid "Save and Load PyTorch Checkpoints" msgstr "파이토치 체크포인트 저장 및 로드" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:60 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -7960,7 +7363,7 @@ msgstr "" "``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파이토치 " "``state_dict``로 변환됩니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:103 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" @@ -7969,17 +7372,38 @@ msgstr "" "진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 저장된 모든 체크포인트를 반복하고 최신 체크포인트를 " "로드합니다:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:116 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." msgstr "``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형의 이 객체를 반환/사용합니다." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:119 +msgid "" +"Alternatively, we can save and load the model updates during evaluation " +"phase by overriding ``evaluate()`` or ``aggregate_evaluate()`` method of " +"the strategy (``FedAvg``). Checkout the details in `Advanced PyTorch " +"Example `_ and `Advanced TensorFlow Example " +"`_." +msgstr "" + #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" msgstr "Flower 1.0으로 업그레이드" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:6 +msgid "" +"This guide is for users who have already worked with Flower 0.x and want " +"to upgrade to Flower 1.0. Newer versions of Flower (1.13 and later) are " +"based on a new architecture and not covered in this guide. After " +"upgrading Flower 0.x projects to Flower 1.0, please refer to " +":doc:`Upgrade to Flower 1.13 ` to make " +"your project compatible with the lastest version of Flower." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:13 msgid "" "Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" " foundation for future growth. Compared to Flower 0.19 (and other 0.x " @@ -7990,22 +7414,22 @@ msgstr "" "Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 " "획기적인 변경 사항이 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 -#: ../../source/how-to-upgrade-to-flower-next.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:49 msgid "Install update" msgstr "업데이트 설치" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:21 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방법입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "pip: add ``-U`` when installing." msgstr "pip: 설치할 때 ``-U``를 추가합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:25 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" @@ -8013,13 +7437,13 @@ msgstr "" "``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 " "사용하는 경우)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -8029,11 +7453,11 @@ msgstr "" "설치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" " 것을 잊지 마세요)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:32 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -8041,26 +7465,26 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " "(``start_simulation`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 -#: ../../source/how-to-upgrade-to-flower-next.rst:121 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:88 msgid "Required changes" msgstr "필수 변경 사항" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "The following breaking changes require manual updates." msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:42 msgid "General" msgstr "일반" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:44 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" @@ -8068,7 +7492,7 @@ msgstr "" "Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -8077,12 +7501,12 @@ msgstr "" "Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "클라이언트" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -8090,7 +7514,7 @@ msgstr "" "``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " "get_parameters(self, config):``로 변경합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -8098,11 +7522,11 @@ msgstr "" "``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " "get_parameters(self, ins: GetParametersIns):``로 변경합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "전략 / ``start_server`` / ``start_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -8110,7 +7534,7 @@ msgstr "" "Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``에 " "전달합니다. 다음은 예제입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -8118,7 +7542,7 @@ msgstr "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -8128,7 +7552,7 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:70 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" @@ -8136,7 +7560,7 @@ msgstr "" "``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``로" " 바꿉니다(이전 항목 참조)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:72 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -8147,19 +7571,19 @@ msgstr "" "클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 " "있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:75 msgid "Rename parameter/ndarray conversion functions:" msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:78 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -8174,23 +7598,23 @@ msgstr "" "호출하여)는 이제 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를" " 수동으로 초기화해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:92 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -8200,11 +7624,11 @@ msgstr "" " ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate`` 및 " "``evaluate_fn``)에 영향을 미칩니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:95 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:97 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8212,7 +7636,7 @@ msgstr "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:99 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -8222,11 +7646,11 @@ msgstr "" "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "Custom strategies" msgstr "사용자 정의 전략" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -8239,13 +7663,13 @@ msgstr "" "BaseException]]``(``aggregate_fit``에서) 및 ``List[Union[Tuple[ClientProxy]," " EvaluateRes], BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 번째 파라미터로 받습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:112 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8253,7 +7677,7 @@ msgstr "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8261,17 +7685,17 @@ msgstr "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:118 msgid "Optional improvements" msgstr "선택적 개선 사항" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:120 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -8281,7 +7705,7 @@ msgstr "" "``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제거합니다. 예를 들어" " 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시자 구현은 더 이상 필요하지 않습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:126 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -8291,12 +7715,12 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 -#: ../../source/how-to-upgrade-to-flower-next.rst:349 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:130 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:451 msgid "Further help" msgstr "추가 도움말" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:132 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -8309,87 +7733,71 @@ msgstr "" "업데이트되어 있으며, Flower 1.0 API를 사용하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워" " 슬랙 `_에 가입하여 ``#questions`` 채널을 이용하세요." -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" -msgstr "Flower Next 업그레이드" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:2 +#, fuzzy +msgid "Upgrade to Flower 1.13" +msgstr "Flower 1.0으로 업그레이드" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:4 +#, fuzzy msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " +"Welcome to the migration guide for updating Flower to Flower 1.13! " "Whether you're a seasoned user or just getting started, this guide will " "help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +" latest features and improvements in Flower 1.13." msgstr "" "Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니다! 이 가이드는 숙련된 사용자든 이제 막" " 시작한 사용자든 상관없이 기존 설정을 원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " "수 있도록 도와드립니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:11 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:10 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"This guide shows how to make pre-``1.13`` Flower code compatible with " +"Flower 1.13 (and later) with only minimal code changes." msgstr "" -"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으로 ``1.8`` 이전의 Flower 코드를" -" 재사용하는 방법을 보여줍니다. 다른 가이드에서는 순수한 Flower Next API로 Flower Next를 end-to-end로" -" 실행하는 방법을 보여드리겠습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:13 msgid "Let's dive in!" msgstr "자세히 알아봅시다!" -#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:51 +#, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " +"Here's how to update an existing installation of Flower to Flower 1.13 " "with ``pip``:" msgstr "기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:74 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:57 +#, fuzzy +msgid "or if you need Flower 1.13 with simulation:" msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" -#: ../../source/how-to-upgrade-to-flower-next.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:63 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" -#: ../../source/how-to-upgrade-to-flower-next.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:73 msgid "or ``pyproject.toml``:" msgstr "또는 ``pyproject.toml``:" -#: ../../source/how-to-upgrade-to-flower-next.rst:101 -msgid "Using Poetry" -msgstr "Poetry 사용" - -#: ../../source/how-to-upgrade-to-flower-next.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:90 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"Starting with Flower 1.8, the *infrastructure* and *application layers* " +"have been decoupled. Flower 1.13 enforces this separation further. Among " +"other things, this allows you to run the exact same code in a simulation " +"as in a real deployment." msgstr "" -"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``poetry install``을 " -"실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는 것을 잊지 마세요)." -#: ../../source/how-to-upgrade-to-flower-next.rst:106 -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:94 +#, fuzzy msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"Instead of starting a client in code via ``start_client()``, you create a" +" |clientapp_link|_. Instead of starting a server in code via " +"``start_server()``, you create a |serverapp_link|_. Both ``ClientApp`` " +"and ``ServerApp`` are started by the long-running components of the " +"server and client: the `SuperLink` and `SuperNode`, respectively." msgstr "" "Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. 코드에서 " "``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 |clientapp_link|_를 생성하여 " @@ -8398,113 +7806,201 @@ msgstr "" "업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 " "사항은 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:102 +msgid "" +"For more details on SuperLink and SuperNode, please see the " +"|flower_architecture_link|_ ." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:105 +msgid "" +"The following non-breaking changes require manual updates and allow you " +"to run your project both in the traditional (now deprecated) way and in " +"the new (recommended) Flower 1.13 way:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:110 msgid "|clientapp_link|_" msgstr "|clientapp_link|_" -#: ../../source/how-to-upgrade-to-flower-next.rst:134 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:112 +#, fuzzy msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"via ``start_client()``. Here's an example:" msgstr "" "|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 " "예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:146 msgid "|serverapp_link|_" msgstr "|serverapp_link|_" -#: ../../source/how-to-upgrade-to-flower-next.rst:159 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:148 +#, fuzzy msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"the server via ``start_server()``. Here's an example:" msgstr "" "서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 " "래핑하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:180 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:185 msgid "Deployment" msgstr "배포" -#: ../../source/how-to-upgrade-to-flower-next.rst:182 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:187 +#, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"In a terminal window, start the SuperLink using |flower_superlink_link|_." +" Then, in two additional terminal windows, start two SuperNodes using " +"|flower_supernode_link|_ (2x). There is no need to directly run " +"``client.py`` and ``server.py`` as Python scripts." msgstr "" "실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 후 " "|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 " "실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:185 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:190 +#, fuzzy msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"Here's an example to start the server without HTTPS (insecure mode, only " +"for prototyping):" msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:195 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"For a comprehensive walk-through on how to deploy Flower using Docker, " +"please refer to the :doc:`docker/index` guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:218 +#, fuzzy +msgid "" +"Here's another example to start both SuperLink and SuperNodes with HTTPS." +" Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile``" +" command line options to pass paths to (CA certificate, server " +"certificate, and server private key)." msgstr "" "다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, " "'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:229 -msgid "Simulation in CLI" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:246 +#, fuzzy +msgid "Simulation (CLI)" msgstr "CLI 시뮬레이션" -#: ../../source/how-to-upgrade-to-flower-next.rst:231 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:248 +#, fuzzy msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"|serverapp_link|_, respectively. There is no need to use " +"``start_simulation()`` anymore. Here's an example:" msgstr "" "기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 " "|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:264 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:253 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:389 +msgid "" +"For a comprehensive guide on how to setup and run Flower simulations " +"please read the |flower_how_to_run_simulations_link|_ guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:310 +msgid "Depending on your Flower version, you can run your simulation as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:312 +msgid "" +"For Flower 1.11 and later, run ``flwr run`` in the terminal. This is the " +"recommended way to start simulations, other ways are deprecated and no " +"longer recommended." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:314 +#, fuzzy msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"DEPRECATED For Flower versions between 1.8 and 1.10, run ``flower-" +"simulation`` in the terminal and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +" In the code snippet below, there is an example (assuming the " +"``server_app`` and ``client_app`` objects are in a ``sim.py`` module)." msgstr "" "CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 코드에서 " "``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 " "``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" -#: ../../source/how-to-upgrade-to-flower-next.rst:281 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:318 +msgid "DEPRECATED For Flower versions before 1.8, run the Python script directly." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:337 +msgid "" +"Depending on your Flower version, you can also define the default " +"resources as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:339 +msgid "" +"For Flower 1.11 and later, you can edit your ``pyproject.toml`` file and " +"then run ``flwr run`` in the terminal as shown in the example below." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:341 +#, fuzzy msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"DEPRECATED For Flower versions between 1.8 and 1.10, you can adjust the " +"resources for each |clientapp_link|_ using the ``--backend-config`` " +"command line argument instead of setting the ``client_resources`` " +"argument in ``start_simulation()``." msgstr "" "|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` " "명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:305 -msgid "Simulation in a Notebook" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:344 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:384 +msgid "" +"DEPRECATED For Flower versions before 1.8, you need to run " +"``start_simulation()`` and pass a dictionary of the required resources to" +" the ``client_resources`` argument." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:375 +#, fuzzy +msgid "Simulation (Notebook)" msgstr "Notebook에서 시뮬레이션" -#: ../../source/how-to-upgrade-to-flower-next.rst:307 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:377 +msgid "" +"To run your simulation from within a notebook, please consider the " +"following examples depending on your Flower version:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:380 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"For Flower 1.11 and later, you need to run |runsim_link|_ in your " +"notebook instead of ``start_simulation()``." msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:382 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"DEPRECATED For Flower versions between 1.8 and 1.10, you need to run " +"|runsim_link|_ in your notebook instead of ``start_simulation()`` and " +"configure the resources." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:453 +#, fuzzy +msgid "" +"Most official `Flower code examples `_ " +"are already updated to Flower 1.13 so they can serve as a reference for " +"using the Flower 1.13 API. If there are further questions, `join the " +"Flower Slack `_ (and use the channel " +"``#questions``) or post them on `Flower Discuss " +"`_ where you can find the community posting " +"and answering questions." msgstr "" "일부 공식 ``Flower 코드 예제 `_는 이미 플라워 넥스트에 " "업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 " @@ -8512,20 +8008,20 @@ msgstr "" "``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른" " 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:460 msgid "Important" msgstr "중요" -#: ../../source/how-to-upgrade-to-flower-next.rst:360 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:462 +#, fuzzy msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"As we continuously enhance Flower at a rapid pace, we'll be periodically " +"updating this guide. Please feel free to share any feedback with us!" msgstr "" "Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " "언제든지 공유해 주세요!" -#: ../../source/how-to-upgrade-to-flower-next.rst:366 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:465 msgid "Happy migrating! 🚀" msgstr "행복한 마이그레이션! 🚀" @@ -8799,7 +8295,7 @@ msgstr "" "서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`이 일치하는 " ":code:`fixedclipping_mod`를 구성해야 합니다:" -#: ../../source/how-to-use-differential-privacy.rst:115 +#: ../../source/how-to-use-differential-privacy.rst:116 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -8813,12 +8309,12 @@ msgstr "" msgid "local DP mod" msgstr "로컬 DP mod" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:126 #, fuzzy msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" -#: ../../source/how-to-use-differential-privacy.rst:140 +#: ../../source/how-to-use-differential-privacy.rst:144 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -8828,19 +8324,20 @@ msgstr "" "여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서가 중요하다는 점에 유의하세요. 일반적으로 차분 " "프라이버시(DP) 수정자는 매개변수에서 가장 마지막에 작동해야 합니다." -#: ../../source/how-to-use-differential-privacy.rst:145 +#: ../../source/how-to-use-differential-privacy.rst:149 msgid "Local Training using Privacy Engines" msgstr "Privacy Engines을 사용한 로컬 훈련" -#: ../../source/how-to-use-differential-privacy.rst:147 +#: ../../source/how-to-use-differential-privacy.rst:151 +#, fuzzy msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " "TensorFlow Privacy. For examples of using Flower with these engines, " "please refer to the Flower examples directory (`Opacus " "`_, `Tensorflow" -" Privacy `_)." +" Privacy `_)." msgstr "" "클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 보호를 보장하려면 Opacus 및 TensorFlow" " Privacy와 같은 개인 정보 보호 엔진을 활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 " @@ -8875,12 +8372,12 @@ msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" #: ../../source/how-to-use-strategies.rst:11 -#: ../../source/how-to-use-strategies.rst:43 +#: ../../source/how-to-use-strategies.rst:66 msgid "Customize an existing strategy with callback functions" msgstr "콜백 함수로 기존 전략 사용자 지정" #: ../../source/how-to-use-strategies.rst:12 -#: ../../source/how-to-use-strategies.rst:99 +#: ../../source/how-to-use-strategies.rst:139 msgid "Implement a novel strategy" msgstr "새로운 전략 구현" @@ -8889,41 +8386,51 @@ msgid "Use an existing strategy" msgstr "기존 전략 사용" #: ../../source/how-to-use-strategies.rst:17 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Flower comes with a number of popular federated learning Strategies which" +" can be instantiated as follows:" msgstr "Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" -#: ../../source/how-to-use-strategies.rst:27 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:45 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the ``start_server`` function. It is usually recommended" -" to adjust a few parameters during instantiation:" +"To make the ``ServerApp`` use this strategy, pass a ``server_fn`` " +"function to the ``ServerApp`` constructor. The ``server_fn`` function " +"should return a ``ServerAppComponents`` object that contains the strategy" +" instance and a ``ServerConfig`` instance." msgstr "" -"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " -"인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 좋습니다:" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:50 +msgid "" +"Both ``Strategy`` and ``ServerConfig`` classes can be configured with " +"parameters. The ``Context`` object passed to ``server_fn`` contains the " +"values specified in the ``[tool.flwr.app.config]`` table in your " +"``pyproject.toml`` (a snippet is shown below). To access these values, " +"use ``context.run_config``." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:68 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " +"Existing strategies provide several ways to customize their behavior. " "Callback functions allow strategies to call user-provided code during " -"execution." +"execution. This approach enables you to modify the strategy's partial " +"behavior without rewriting the whole class from zero." msgstr "" "기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 사용하면 전략이 실행 중에 사용자가 제공한 코드를 " "호출할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:49 +#: ../../source/how-to-use-strategies.rst:73 msgid "Configuring client fit and client evaluate" msgstr "클라이언트 적합성 및 클라이언트 평가 구성" -#: ../../source/how-to-use-strategies.rst:51 +#: ../../source/how-to-use-strategies.rst:75 #, fuzzy msgid "" "The server can pass new configuration values to the client each round by " "providing a function to ``on_fit_config_fn``. The provided function will " "be called by the strategy and must return a dictionary of configuration " -"key values pairs that will be sent to the client. It must return a " +"key value pairs that will be sent to the client. It must return a " "dictionary of arbitrary configuration values ``client.fit`` and " "``client.evaluate`` functions during each round of federated learning." msgstr "" @@ -8932,20 +8439,23 @@ msgstr "" "학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client.fit` 및 " ":code:`client.evaluate` 함수를 반환해야 합니다." -#: ../../source/how-to-use-strategies.rst:84 +#: ../../source/how-to-use-strategies.rst:121 #, fuzzy msgid "" "The ``on_fit_config_fn`` can be used to pass arbitrary configuration " -"values from server to client, and potentially change these values each " +"values from server to client and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " "the dictionary returned by the ``on_fit_config_fn`` in its own " -"``client.fit()`` function." +"``client.fit()`` function. And while the values can be also passed " +"directly via the context this function can be a place to implement finer " +"control over the `fit` behaviour that may not be achieved by the context," +" which sets fixed values." msgstr "" ":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, 예를 들어 학습 속도를 조정하기 " "위해 매 라운드마다 이 값을 잠재적으로 변경하는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` " "함수에서 :code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:129 #, fuzzy msgid "" "Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " @@ -8954,18 +8464,18 @@ msgstr "" ":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구성을 사용자 " "지정하는 :code:`on_evaluate_config_fn`도 있습니다" -#: ../../source/how-to-use-strategies.rst:93 +#: ../../source/how-to-use-strategies.rst:133 msgid "Configuring server-side evaluation" msgstr "서버 측 평가 구성" -#: ../../source/how-to-use-strategies.rst:95 +#: ../../source/how-to-use-strategies.rst:135 #, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " "to ``evaluate_fn``." msgstr "서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:101 +#: ../../source/how-to-use-strategies.rst:141 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies `_ is a " "friendly federated learning framework." @@ -9110,33 +8617,33 @@ msgid "" "specific goal." msgstr "문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." -#: ../../source/index.rst:116 +#: ../../source/index.rst:109 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 설명하고 토론합니다." -#: ../../source/index.rst:128 +#: ../../source/index.rst:121 msgid "References" msgstr "참조" -#: ../../source/index.rst:130 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "정보 지향 API 참조 및 기타 참고 자료." -#: ../../source/index.rst:139::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr ":py:obj:`flwr `\\" -#: ../../source/index.rst:139::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "Flower 메인 패키지." -#: ../../source/index.rst:155 +#: ../../source/index.rst:148 msgid "Contributor docs" msgstr "기여자 문서" -#: ../../source/index.rst:157 +#: ../../source/index.rst:150 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -9146,9 +8653,13 @@ msgstr "Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 msgid "Flower CLI reference" msgstr "Flower CLI 참조" -#: ../../source/ref-api-cli.rst:7 +#: ../../source/ref-api-cli.rst:5 +msgid "Basic Commands" +msgstr "" + +#: ../../source/ref-api-cli.rst:10 #, fuzzy -msgid "flwr CLI" +msgid "``flwr`` CLI" msgstr "Flower 클라이언트." #: ../../flwr:1 @@ -9240,7 +8751,7 @@ msgstr "" msgid "Arguments" msgstr "빌드 전달인자" -#: ../../flwr install:1 log:1 new:1 run:1 +#: ../../flwr install:1 log:1 ls:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "선택적 개선 사항" @@ -9257,7 +8768,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log run +#: ../../flwr log ls run msgid "default" msgstr "" @@ -9284,10 +8795,36 @@ msgstr "" msgid "Name of the federation to run the app on" msgstr "" +#: ../../flwr ls:1 +msgid "List runs." +msgstr "" + +#: ../../flwr ls:1 +msgid "List all runs" +msgstr "" + +#: ../../flwr ls:1 run:1 +#, fuzzy +msgid "``False``" +msgstr "``flwr/base``" + +#: ../../flwr ls:1 +msgid "Specific run ID to display" +msgstr "" + +#: ../../flwr ls:1 +#, fuzzy +msgid "Path of the Flower project" +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr ls:1 +msgid "Name of the federation" +msgstr "" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." -msgstr "Flower 서버를 실행하세요." +msgstr "새 페이지 만들기" #: ../../flwr new:1 msgid "The ML framework to use" @@ -9316,7 +8853,7 @@ msgstr "Flower 기본 이미지의 태그." #: ../../flwr run:1 #, fuzzy msgid "Run Flower App." -msgstr "Flower 서버를 실행하세요." +msgstr "Flower 서버." #: ../../flwr run:1 msgid "Override configuration key-value pairs, should be of the format:" @@ -9340,11 +8877,6 @@ msgid "" "default." msgstr "" -#: ../../flwr run:1 -#, fuzzy -msgid "``False``" -msgstr "``flwr/base``" - #: ../../flwr run:1 #, fuzzy msgid "Path of the Flower App to run." @@ -9354,37 +8886,66 @@ msgstr "Flower 기본 이미지의 태그." msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api-cli.rst:16 -msgid "flower-simulation" -msgstr "flower 시뮬레이션" - -#: ../../source/ref-api-cli.rst:26 -msgid "flower-superlink" +#: ../../source/ref-api-cli.rst:19 +#, fuzzy +msgid "``flower-superlink``" msgstr "flower 초연결" -#: ../../source/ref-api-cli.rst:36 +#: ../../source/ref-api-cli.rst:29 #, fuzzy -msgid "flower-supernode" +msgid "``flower-supernode``" msgstr "Flower SuperNode" -#: ../../source/ref-api-cli.rst:46 -msgid "flower-server-app" +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "Advanced Commands" +msgstr "고급 Docker 옵션" + +#: ../../source/ref-api-cli.rst:42 +#, fuzzy +msgid "``flwr-serverapp``" +msgstr "flower 서버 프로그램" + +#: ../../source/ref-api-cli.rst:52 +#, fuzzy +msgid "``flwr-clientapp``" +msgstr "Flower ClientApp." + +#: ../../source/ref-api-cli.rst:60 +msgid "Technical Commands" +msgstr "" + +#: ../../source/ref-api-cli.rst:65 +#, fuzzy +msgid "``flower-simulation``" +msgstr "flower 시뮬레이션" + +#: ../../source/ref-api-cli.rst:73 +msgid "Deprecated Commands" +msgstr "" + +#: ../../source/ref-api-cli.rst:78 +#, fuzzy +msgid "``flower-server-app``" msgstr "flower 서버 프로그램" -#: ../../source/ref-api-cli.rst:50 +#: ../../source/ref-api-cli.rst:82 msgid "" -"Note that since version ``1.11.0``, ``flower-server-app`` no longer " -"supports passing a reference to a `ServerApp` attribute. Instead, you " -"need to pass the path to Flower app via the argument ``--app``. This is " -"the path to a directory containing a `pyproject.toml`. You can create a " -"valid Flower app by executing ``flwr new`` and following the prompt." +"Note that from version ``1.13.0``, ``flower-server-app`` is deprecated. " +"Instead, you only need to execute |flwr_run_link|_ to start the run." msgstr "" -#: ../../source/ref-api-cli.rst:64 +#: ../../source/ref-api-cli.rst:88 #, fuzzy -msgid "flower-superexec" +msgid "``flower-superexec``" msgstr "flower 초연결" +#: ../../source/ref-api-cli.rst:92 +msgid "" +"Note that from version ``1.13.0``, ``flower-superexec`` is deprecated. " +"Instead, you only need to execute |flower_superlink_link|_." +msgstr "" + #: ../../source/ref-api/flwr.rst:2 msgid "flwr" msgstr "flwr" @@ -9476,6 +9037,7 @@ msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." #: ../../source/ref-api/flwr.server.rst:24 #: ../../source/ref-api/flwr.server.strategy.rst:17 #: ../../source/ref-api/flwr.server.workflow.rst:17 +#: ../../source/ref-api/flwr.simulation.rst:26 msgid "Classes" msgstr "클래스" @@ -9592,6 +9154,7 @@ msgstr "Bases: :py:class:`~abc.ABC`" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:15 msgid "Methods" msgstr "메소드" @@ -9692,7 +9255,7 @@ msgstr "클라이언트(자체)를 반환합니다." #: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.Driver.rst:43 #: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 @@ -9736,6 +9299,7 @@ msgstr "" #: flwr.server.driver.driver.Driver.pull_messages #: flwr.server.driver.driver.Driver.push_messages #: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.driver.driver.Driver.set_run #: flwr.server.serverapp_components.ServerAppComponents #: flwr.server.strategy.bulyan.Bulyan #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping @@ -9759,7 +9323,8 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.run_simulation.run_simulation of +#: flwr.simulation.run_simulation.run_simulation +#: flwr.simulation.simulationio_connection.SimulationIoConnection of msgid "Parameters" msgstr "파라미터" @@ -9777,6 +9342,7 @@ msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로 #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -9809,6 +9375,7 @@ msgstr "로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이 #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -9863,10 +9430,6 @@ msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져 msgid "The current client properties." msgstr "현재 클라이언트 속성입니다." -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" -msgstr "클라이언트앱" - #: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 #: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 #: flwr.common.context.Context:1 flwr.common.message.Error:1 @@ -9885,11 +9448,11 @@ msgstr "클라이언트앱" #: flwr.server.serverapp_components.ServerAppComponents:1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of msgid "Bases: :py:class:`object`" msgstr "Bases: :py:class:`object`" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.app.start_client:51 flwr.client.app.start_numpy_client:36 #: flwr.client.client_app.ClientApp:4 #: flwr.client.client_app.ClientApp.evaluate:4 #: flwr.client.client_app.ClientApp.query:4 @@ -9898,7 +9461,7 @@ msgstr "Bases: :py:class:`object`" #: flwr.common.record.configsrecord.ConfigsRecord:20 #: flwr.common.record.metricsrecord.MetricsRecord:19 #: flwr.common.record.parametersrecord.ParametersRecord:22 -#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:46 #: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 @@ -10394,7 +9957,13 @@ msgstr "secaggplus\\_mod" msgid "start\\_client" msgstr "start\\_client" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: flwr.client.app.start_client:5 of +msgid "" +"This function is deprecated since 1.13.0. Use :code:`flower-supernode` " +"command instead to start a SuperNode." +msgstr "" + +#: flwr.client.app.start_client:8 flwr.client.app.start_numpy_client:9 of msgid "" "The IPv4 or IPv6 address of the server. If the Flower server runs on the " "same machine on port 8080, then `server_address` would be " @@ -10403,17 +9972,17 @@ msgstr "" "서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에서 실행되는 경우 `서버_주소`는 " "`\"[::]:8080\"`이 됩니다." -#: flwr.client.app.start_client:7 of +#: flwr.client.app.start_client:12 of msgid "A callable that instantiates a Client. (default: None)" msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" -#: flwr.client.app.start_client:9 of +#: flwr.client.app.start_client:14 of msgid "" "An implementation of the abstract base class `flwr.client.Client` " "(default: None)" msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: flwr.client.app.start_client:17 flwr.client.app.start_numpy_client:15 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " server. The default should be sufficient for most models. Users who " @@ -10426,7 +9995,7 @@ msgstr "" "훈련하는 사용자는 이 값을 늘려야 할 수도 있습니다. Flower 서버는 동일한 값으로 시작해야 " "하며(`flwr.server.start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차단합니다." -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.client.app.start_client:24 flwr.client.app.start_numpy_client:22 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -10435,7 +10004,7 @@ msgstr "" "바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 사용하여 SSL이 활성화된 Flower 서버에 " "보안 연결이 설정됩니다." -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.client.app.start_client:28 flwr.client.app.start_numpy_client:26 of msgid "" "Starts an insecure gRPC connection when True. Enables HTTPS connection " "when False, using system certificates if `root_certificates` is None." @@ -10443,7 +10012,7 @@ msgstr "" "True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None인 경우 시스템 인증서를 " "사용하여 False일 때 HTTPS 연결을 활성화합니다." -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.client.app.start_client:31 flwr.client.app.start_numpy_client:29 of msgid "" "Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " "bidirectional streaming - 'grpc-rere': gRPC, request-response " @@ -10452,7 +10021,15 @@ msgstr "" "전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리밍 - 'grpc-rere': " "gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" -#: flwr.client.app.start_client:31 of +#: flwr.client.app.start_client:36 of +msgid "" +"Tuple containing the elliptic curve private key and public key for " +"authentication from the cryptography library. Source: " +"https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ Used " +"to establish an authenticated connection with the server." +msgstr "" + +#: flwr.client.app.start_client:41 of msgid "" "The maximum number of times the client will try to connect to the server " "before giving up in case of a connection error. If set to None, there is " @@ -10461,7 +10038,7 @@ msgstr "" "연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입니다. None으로 설정하면 시도 횟수에 제한이 " "없습니다." -#: flwr.client.app.start_client:35 of +#: flwr.client.app.start_client:45 of msgid "" "The maximum duration before the client stops trying to connect to the " "server in case of connection error. If set to None, there is no limit to " @@ -10470,15 +10047,15 @@ msgstr "" "연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니다. None으로 설정하면 총 시간에는 제한이 " "없습니다." -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:59 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:70 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" @@ -10677,8 +10254,8 @@ msgstr "레코드를 설정합니다." #: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`Context `\\ \\(node\\_id\\, " -"node\\_config\\, state\\, run\\_config\\)" +":py:obj:`Context `\\ \\(run\\_id\\, node\\_id\\, " +"node\\_config\\, state\\, ...\\)" msgstr ":py:obj:`Context `\\ \\(state\\)" #: ../../source/ref-api/flwr.common.rst:68::1 @@ -11258,19 +10835,25 @@ msgstr "컨텍스트" #: flwr.common.context.Context:3 of #, fuzzy -msgid "The ID that identifies the node." +msgid "The ID that identifies the run." msgstr "오류 식별자입니다." #: flwr.common.context.Context:5 of +#, fuzzy +msgid "The ID that identifies the node." +msgstr "오류 식별자입니다." + +#: flwr.common.context.Context:7 of msgid "" "A config (key/value mapping) unique to the node and independent of the " "`run_config`. This config persists across all runs this node participates" " in." msgstr "" -#: flwr.common.context.Context:8 of +#: flwr.common.context.Context:10 of +#, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " +"Holds records added by the entity in a given `run_id` and that will stay " "local. This means that the data it holds will never leave the system it's" " running from. This can be used as an intermediate storage or scratchpad " "when executing mods. It can also be used as a memory to access at " @@ -11281,28 +10864,33 @@ msgstr "" "모드를 실행할 때 중간 저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 다른 시점에서 액세스하기 " "위한 메모리로도 사용할 수 있습니다(예: 여러 라운드에 걸쳐)" -#: flwr.common.context.Context:15 of +#: flwr.common.context.Context:17 of msgid "" -"A config (key/value mapping) held by the entity in a given run and that " -"will stay local. It can be used at any point during the lifecycle of this" -" entity (e.g. across multiple rounds)" +"A config (key/value mapping) held by the entity in a given `run_id` and " +"that will stay local. It can be used at any point during the lifecycle of" +" this entity (e.g. across multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr ":py:obj:`src_node_id `\\" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 msgid ":py:obj:`state `\\" msgstr ":py:obj:`state `\\" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr ":py:obj:`config `\\" @@ -11903,22 +11491,6 @@ msgstr "" ":py:obj:`START_SIMULATION_LEAVE " "`\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" - #: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" @@ -12556,6 +12128,10 @@ msgstr "" msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.common.message.Message.create_error_reply:12 of +msgid "**message** -- A Message containing only the relevant error and metadata." +msgstr "" + #: flwr.common.message.Message.create_reply:3 of msgid "" "The method generates a new `Message` as a reply to this message. It " @@ -12601,6 +12177,10 @@ msgstr ":py:obj:`GET_PARAMETERS `\ msgid ":py:obj:`GET_PROPERTIES `\\" msgstr ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Metadata" + #: flwr.common.Metadata.created_at:1::1 #: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of msgid "An identifier for the current run." @@ -13191,7 +12771,8 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:37::1 #: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#, fuzzy +msgid "Abstract base Driver class for the ServerAppIo API." msgstr "Driver API를 위한 Abstract base Driver class." #: ../../source/ref-api/flwr.server.rst:37::1 @@ -13365,6 +12946,11 @@ msgstr "적어도 1개의 `num_clients` 가 사용 가능해질 때까지 기다 msgid "**num_available** -- The number of currently available clients." msgstr "" +#: flwr.server.client_manager.ClientManager.register:3 of +#, fuzzy +msgid "The ClientProxy of the Client to register." +msgstr "클라이언트 모델의 민감도입니다." + #: flwr.server.client_manager.ClientManager.register:6 #: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" @@ -13378,63 +12964,78 @@ msgstr "" msgid "This method is idempotent." msgstr "" +#: flwr.server.client_manager.ClientManager.unregister:5 of +#, fuzzy +msgid "The ClientProxy of the Client to unregister." +msgstr "클라이언트 모델의 민감도입니다." + #: ../../source/ref-api/flwr.server.Driver.rst:2 msgid "Driver" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`create_message `\\ " "\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.create_message:1 of msgid "Create a new message with specified parameters." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.get_node_ids:1 of msgid "Get node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of msgid "Pull messages based on message IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`push_messages `\\ " "\\(messages\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.push_messages:1 of msgid "Push messages to specified node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`send_and_receive `\\ " "\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#, fuzzy +msgid ":py:obj:`set_run `\\ \\(run\\_id\\)" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#: flwr.server.driver.driver.Driver.set_run:1 of +msgid "Request a run to the SuperLink with a given `run_id`." +msgstr "" + #: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy msgid ":py:obj:`run `\\" @@ -13546,6 +13147,17 @@ msgid "" "which is not affected by `timeout`." msgstr "" +#: flwr.server.driver.driver.Driver.set_run:3 of +msgid "" +"If a Run with the specified `run_id` exists, a local Run object will be " +"created. It enables further functionality in the driver, such as sending " +"`Messages`." +msgstr "" + +#: flwr.server.driver.driver.Driver.set_run:7 of +msgid "The `run_id` of the Run this Driver object operates in." +msgstr "" + #: ../../source/ref-api/flwr.server.History.rst:2 msgid "History" msgstr "" @@ -13618,37 +13230,42 @@ msgstr "" msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`client_manager `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`history `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr ":py:obj:`src_node_id `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr ":py:obj:`config `\\" @@ -13728,10 +13345,6 @@ msgstr "" msgid "Replace server strategy." msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" -msgstr "" - #: flwr.server.server_app.ServerApp:5 of msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" @@ -13759,7 +13372,7 @@ msgid "" "thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.app.start_server:9 +#: flwr.server.app.start_server:14 #: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " @@ -13886,31 +13499,37 @@ msgstr "" msgid "start\\_server" msgstr "" -#: flwr.server.app.start_server:3 of +#: flwr.server.app.start_server:5 of +msgid "" +"This function is deprecated since 1.13.0. Use the :code:`flower-" +"superlink` command instead to start a SuperLink." +msgstr "" + +#: flwr.server.app.start_server:8 of msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.server.app.start_server:10 of msgid "" "A server implementation, either `flwr.server.Server` or a subclass " "thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.server.app.start_server:17 of msgid "" "An implementation of the abstract base class " "`flwr.server.strategy.Strategy`. If no strategy is provided, then " "`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.server.app.start_server:21 of msgid "" "An implementation of the abstract base class `flwr.server.ClientManager`." " If no implementation is provided, then `start_server` will use " "`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.server.app.start_server:26 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " clients. The default should be sufficient for most models. Users who " @@ -13920,7 +13539,7 @@ msgid "" "increased limit and block larger messages." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " @@ -13928,34 +13547,34 @@ msgid "" "server certificate. * server private key." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " "bytes elements in the following order:" msgstr "" -#: flwr.server.app.start_server:32 of +#: flwr.server.app.start_server:37 of msgid "CA certificate." msgstr "" -#: flwr.server.app.start_server:33 of +#: flwr.server.app.start_server:38 of msgid "server certificate." msgstr "" -#: flwr.server.app.start_server:34 of +#: flwr.server.app.start_server:39 of msgid "server private key." msgstr "" -#: flwr.server.app.start_server:37 of +#: flwr.server.app.start_server:42 of msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.app.start_server:42 of +#: flwr.server.app.start_server:47 of msgid "Starting an insecure server:" msgstr "" -#: flwr.server.app.start_server:46 of +#: flwr.server.app.start_server:51 of msgid "Starting an SSL-enabled server:" msgstr "" @@ -15242,7 +14861,7 @@ msgid "" msgstr "" #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:1231 +#: ../../source/ref-changelog.md:1434 msgid "FedAdagrad" msgstr "" @@ -16895,18 +16514,34 @@ msgstr "" msgid "simulation" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 msgid "" ":py:obj:`run_simulation `\\ " "\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.run_simulation.run_simulation:1 of msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#, fuzzy +msgid "" +":py:obj:`run_simulation_process " +"`\\ \\(...\\[\\, flwr\\_dir\\_\\," +" ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#: flwr.simulation.app.run_simulation_process:1 of +#, fuzzy +msgid "Run Flower Simulation process." +msgstr "Flower 시뮬레이션." + +#: ../../source/ref-api/flwr.simulation.rst:24::1 #, fuzzy msgid "" ":py:obj:`start_simulation `\\ " @@ -16915,11 +16550,44 @@ msgstr "" ":py:obj:`start_client `\\ \\(\\*\\, " "server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.start_simulation:1 of msgid "Log error stating that module `ray` could not be imported." msgstr "" +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#, fuzzy +msgid "" +":py:obj:`SimulationIoConnection " +"`\\ \\(\\[...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of +msgid "`SimulationIoConnection` provides an interface to the SimulationIo API." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:2 +#, fuzzy +msgid "SimulationIoConnection" +msgstr "CLI 시뮬레이션" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:3 of +msgid "The address (URL, IPv6, IPv4) of the SuperLink SimulationIo API service." +msgstr "" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:5 of +#, fuzzy +msgid "" +"The PEM-encoded root certificates as a byte string. If provided, a secure" +" connection using the certificates will be established to an SSL-enabled " +"Flower server." +msgstr "" +"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 사용하여 SSL이 활성화된 Flower 서버에 " +"보안 연결이 설정됩니다." + #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 msgid "run\\_simulation" msgstr "" @@ -16974,6 +16642,11 @@ msgid "" "If enabled, DEBUG-level logs will be displayed." msgstr "" +#: ../../source/ref-api/flwr.simulation.run_simulation_process.rst:2 +#, fuzzy +msgid "run\\_simulation\\_process" +msgstr "시뮬레이션 실행" + #: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 msgid "start\\_simulation" msgstr "" @@ -16983,25 +16656,27 @@ msgid "Changelog" msgstr "" #: ../../source/ref-changelog.md:3 -msgid "v1.11.1 (2024-09-11)" +msgid "v1.13.1 (2024-11-26)" msgstr "" #: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 +#: ../../source/ref-changelog.md:138 ../../source/ref-changelog.md:208 +#: ../../source/ref-changelog.md:240 ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:442 ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:606 ../../source/ref-changelog.md:699 +#: ../../source/ref-changelog.md:799 ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:1074 ../../source/ref-changelog.md:1143 msgid "Thanks to our contributors" msgstr "" #: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:140 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:242 ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:444 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:608 ../../source/ref-changelog.md:701 +#: ../../source/ref-changelog.md:801 ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:949 ../../source/ref-changelog.md:1007 msgid "" "We would like to give our special thanks to all the contributors who made" " the new version of Flower possible (in `git shortlog` order):" @@ -17009,6819 +16684,6990 @@ msgstr "" #: ../../source/ref-changelog.md:9 msgid "" -"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " -"`Javier`, `Robert Steiner`, `Yan Gao` " +"`Adam Narozniak`, `Charles Beauville`, `Heng Pan`, `Javier`, `Robert " +"Steiner` " +msgstr "" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:144 ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:448 +#: ../../source/ref-changelog.md:548 ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:705 ../../source/ref-changelog.md:805 +#: ../../source/ref-changelog.md:889 ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:1011 ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1209 ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1318 ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1429 ../../source/ref-changelog.md:1468 +#: ../../source/ref-changelog.md:1501 ../../source/ref-changelog.md:1551 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:11 -#, fuzzy -msgid "Improvements" -msgstr "선택적 개선 사항" - #: ../../source/ref-changelog.md:13 msgid "" -"**Implement** `keys/values/items` **methods for** `TypedDict` " -"([#4146](https://github.com/adap/flower/pull/4146))" +"**Fix `SimulationEngine` Executor for SuperLink** " +"([#4563](https://github.com/adap/flower/pull/4563), " +"[#4568](https://github.com/adap/flower/pull/4568), " +"[#4570](https://github.com/adap/flower/pull/4570))" msgstr "" #: ../../source/ref-changelog.md:15 msgid "" -"**Fix parsing of** `--executor-config` **if present** " -"([#4125](https://github.com/adap/flower/pull/4125))" +"Resolved an issue that prevented SuperLink from functioning correctly " +"when using the `SimulationEngine` executor." msgstr "" #: ../../source/ref-changelog.md:17 msgid "" -"**Adjust framework name in templates docstrings** " -"([#4127](https://github.com/adap/flower/pull/4127))" +"**Improve FAB build and install** " +"([#4571](https://github.com/adap/flower/pull/4571))" msgstr "" #: ../../source/ref-changelog.md:19 msgid "" -"**Update** `flwr new` **Hugging Face template** " -"([#4169](https://github.com/adap/flower/pull/4169))" +"An updated FAB build and install process produces smaller FAB files and " +"doesn't rely on `pip install` any more. It also resolves an issue where " +"all files were unnecessarily included in the FAB file. The `flwr` CLI " +"commands now correctly pack only the necessary files, such as `.md`, " +"`.toml` and `.py`, ensuring more efficient and accurate packaging." msgstr "" #: ../../source/ref-changelog.md:21 msgid "" -"**Fix** `flwr new` **FlowerTune template** " -"([#4123](https://github.com/adap/flower/pull/4123))" +"**Update** `embedded-devices` **example** " +"([#4381](https://github.com/adap/flower/pull/4381))" msgstr "" #: ../../source/ref-changelog.md:23 -msgid "" -"**Add buffer time after** `ServerApp` **thread initialization** " -"([#4119](https://github.com/adap/flower/pull/4119))" +msgid "The example now uses the `flwr run` command and the Deployment Engine." msgstr "" #: ../../source/ref-changelog.md:25 msgid "" -"**Handle unsuitable resources for simulation** " -"([#4143](https://github.com/adap/flower/pull/4143))" +"**Update Documentation** " +"([#4566](https://github.com/adap/flower/pull/4566), " +"[#4569](https://github.com/adap/flower/pull/4569), " +"[#4560](https://github.com/adap/flower/pull/4560), " +"[#4556](https://github.com/adap/flower/pull/4556), " +"[#4581](https://github.com/adap/flower/pull/4581), " +"[#4537](https://github.com/adap/flower/pull/4537), " +"[#4562](https://github.com/adap/flower/pull/4562), " +"[#4582](https://github.com/adap/flower/pull/4582))" msgstr "" #: ../../source/ref-changelog.md:27 msgid "" -"**Update example READMEs** " -"([#4117](https://github.com/adap/flower/pull/4117))" +"Enhanced documentation across various aspects, including updates to " +"translation workflows, Docker-related READMEs, and recommended datasets. " +"Improvements also include formatting fixes for dataset partitioning docs " +"and better references to resources in the datasets documentation index." msgstr "" #: ../../source/ref-changelog.md:29 msgid "" -"**Update SuperNode authentication docs** " -"([#4160](https://github.com/adap/flower/pull/4160))" +"**Update Infrastructure and CI/CD** " +"([#4577](https://github.com/adap/flower/pull/4577), " +"[#4578](https://github.com/adap/flower/pull/4578), " +"[#4558](https://github.com/adap/flower/pull/4558), " +"[#4551](https://github.com/adap/flower/pull/4551), " +"[#3356](https://github.com/adap/flower/pull/3356), " +"[#4559](https://github.com/adap/flower/pull/4559), " +"[#4575](https://github.com/adap/flower/pull/4575))" msgstr "" -#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 -#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 -#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 -#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 -#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 -#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 -#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 -#: ../../source/ref-changelog.md:1253 -msgid "Incompatible changes" +#: ../../source/ref-changelog.md:31 +msgid "" +"**General improvements** " +"([#4557](https://github.com/adap/flower/pull/4557), " +"[#4564](https://github.com/adap/flower/pull/4564), " +"[#4573](https://github.com/adap/flower/pull/4573), " +"[#4561](https://github.com/adap/flower/pull/4561), " +"[#4579](https://github.com/adap/flower/pull/4579), " +"[#4572](https://github.com/adap/flower/pull/4572))" +msgstr "" + +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:198 ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:408 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." msgstr "" #: ../../source/ref-changelog.md:35 -msgid "v1.11.0 (2024-08-30)" +msgid "v1.13.0 (2024-11-20)" msgstr "" #: ../../source/ref-changelog.md:41 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," -" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " -"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " -msgstr "" - -#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 -#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 -#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 -#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 -#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 -#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 -#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 -#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 -#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 -#: ../../source/ref-changelog.md:1348 -msgid "What's new?" +"Beutel`, `Daniel Nata Nugraha`, `Dimitris Stripelis`, `Heng Pan`, " +"`Javier`, `Mohammad Naseri`, `Robert Steiner`, `Waris Gill`, `William " +"Lindskog`, `Yan Gao`, `Yao Xu`, `wwjang` " msgstr "" #: ../../source/ref-changelog.md:45 msgid "" -"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " -"([#4006](https://github.com/adap/flower/pull/4006), " -"[#3945](https://github.com/adap/flower/pull/3945), " -"[#3999](https://github.com/adap/flower/pull/3999), " -"[#4027](https://github.com/adap/flower/pull/4027), " -"[#3851](https://github.com/adap/flower/pull/3851), " -"[#3946](https://github.com/adap/flower/pull/3946), " -"[#4003](https://github.com/adap/flower/pull/4003), " -"[#4029](https://github.com/adap/flower/pull/4029), " -"[#3942](https://github.com/adap/flower/pull/3942), " -"[#3957](https://github.com/adap/flower/pull/3957), " -"[#4020](https://github.com/adap/flower/pull/4020), " -"[#4044](https://github.com/adap/flower/pull/4044), " -"[#3852](https://github.com/adap/flower/pull/3852), " -"[#4019](https://github.com/adap/flower/pull/4019), " -"[#4031](https://github.com/adap/flower/pull/4031), " -"[#4036](https://github.com/adap/flower/pull/4036), " -"[#4049](https://github.com/adap/flower/pull/4049), " -"[#4017](https://github.com/adap/flower/pull/4017), " -"[#3943](https://github.com/adap/flower/pull/3943), " -"[#3944](https://github.com/adap/flower/pull/3944), " -"[#4011](https://github.com/adap/flower/pull/4011), " -"[#3619](https://github.com/adap/flower/pull/3619))" +"**Introduce `flwr ls` command** " +"([#4460](https://github.com/adap/flower/pull/4460), " +"[#4459](https://github.com/adap/flower/pull/4459), " +"[#4477](https://github.com/adap/flower/pull/4477))" msgstr "" #: ../../source/ref-changelog.md:47 msgid "" -"Dynamic code updates are here! `flwr run` can now ship and install the " -"latest version of your `ServerApp` and `ClientApp` to an already-running " -"federation (SuperLink and SuperNodes)." +"The `flwr ls` command is now available to display details about all runs " +"(or one specific run). It supports the following usage options:" msgstr "" #: ../../source/ref-changelog.md:49 -msgid "" -"How does it work? `flwr run` bundles your Flower app into a single FAB " -"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," -" to both the SuperLink and those SuperNodes that need it. This allows you" -" to keep SuperExec, SuperLink and SuperNodes running as permanent " -"infrastructure, and then ship code updates (including completely new " -"projects!) dynamically." +msgid "`flwr ls --runs [] []`: Lists all runs." msgstr "" -#: ../../source/ref-changelog.md:51 -msgid "`flwr run` is all you need." +#: ../../source/ref-changelog.md:50 +msgid "" +"`flwr ls --run-id [] []`: Displays details for " +"a specific run." msgstr "" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:52 msgid "" -"**Introduce isolated** `ClientApp` **execution** " -"([#3970](https://github.com/adap/flower/pull/3970), " -"[#3976](https://github.com/adap/flower/pull/3976), " -"[#4002](https://github.com/adap/flower/pull/4002), " -"[#4001](https://github.com/adap/flower/pull/4001), " -"[#4034](https://github.com/adap/flower/pull/4034), " -"[#4037](https://github.com/adap/flower/pull/4037), " -"[#3977](https://github.com/adap/flower/pull/3977), " -"[#4042](https://github.com/adap/flower/pull/4042), " -"[#3978](https://github.com/adap/flower/pull/3978), " -"[#4039](https://github.com/adap/flower/pull/4039), " -"[#4033](https://github.com/adap/flower/pull/4033), " -"[#3971](https://github.com/adap/flower/pull/3971), " -"[#4035](https://github.com/adap/flower/pull/4035), " -"[#3973](https://github.com/adap/flower/pull/3973), " -"[#4032](https://github.com/adap/flower/pull/4032))" +"This command provides information including the run ID, FAB ID and " +"version, run status, elapsed time, and timestamps for when the run was " +"created, started running, and finished." msgstr "" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:54 msgid "" -"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" -" enterprise deployment, this allows you to set strict limits on what the " -"`ClientApp` can and cannot do." +"**Fuse SuperLink and SuperExec** " +"([#4358](https://github.com/adap/flower/pull/4358), " +"[#4403](https://github.com/adap/flower/pull/4403), " +"[#4406](https://github.com/adap/flower/pull/4406), " +"[#4357](https://github.com/adap/flower/pull/4357), " +"[#4359](https://github.com/adap/flower/pull/4359), " +"[#4354](https://github.com/adap/flower/pull/4354), " +"[#4229](https://github.com/adap/flower/pull/4229), " +"[#4283](https://github.com/adap/flower/pull/4283), " +"[#4352](https://github.com/adap/flower/pull/4352))" msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "`flower-supernode` supports three `--isolation` modes:" +#: ../../source/ref-changelog.md:56 +msgid "" +"SuperExec has been integrated into SuperLink, enabling SuperLink to " +"directly manage ServerApp processes (`flwr-serverapp`). The `flwr` CLI " +"now targets SuperLink's Exec API. Additionally, SuperLink introduces two " +"isolation modes for running ServerApps: `subprocess` (default) and " +"`process`, which can be specified using the `--isolation " +"{subprocess,process}` flag." msgstr "" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:58 msgid "" -"Unset: The SuperNode runs the `ClientApp` in the same process (as in " -"previous versions of Flower). This is the default mode." +"**Introduce `flwr-serverapp` command** " +"([#4394](https://github.com/adap/flower/pull/4394), " +"[#4370](https://github.com/adap/flower/pull/4370), " +"[#4367](https://github.com/adap/flower/pull/4367), " +"[#4350](https://github.com/adap/flower/pull/4350), " +"[#4364](https://github.com/adap/flower/pull/4364), " +"[#4400](https://github.com/adap/flower/pull/4400), " +"[#4363](https://github.com/adap/flower/pull/4363), " +"[#4401](https://github.com/adap/flower/pull/4401), " +"[#4388](https://github.com/adap/flower/pull/4388), " +"[#4402](https://github.com/adap/flower/pull/4402))" msgstr "" #: ../../source/ref-changelog.md:60 msgid "" -"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " -"`ClientApp`." +"The `flwr-serverapp` command has been introduced as a CLI entry point " +"that runs a `ServerApp` process. This process communicates with SuperLink" +" to load and execute the `ServerApp` object, enabling isolated execution " +"and more flexible deployment." msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:62 msgid "" -"`--isolation=process`: The SuperNode expects an externally-managed " -"process to run the `ClientApp`. This external process is not managed by " -"the SuperNode, so it has to be started beforehand and terminated " -"manually. The common way to use this isolation mode is via the new " -"`flwr/clientapp` Docker image." +"**Improve simulation engine and introduce `flwr-simulation` command** " +"([#4433](https://github.com/adap/flower/pull/4433), " +"[#4486](https://github.com/adap/flower/pull/4486), " +"[#4448](https://github.com/adap/flower/pull/4448), " +"[#4427](https://github.com/adap/flower/pull/4427), " +"[#4438](https://github.com/adap/flower/pull/4438), " +"[#4421](https://github.com/adap/flower/pull/4421), " +"[#4430](https://github.com/adap/flower/pull/4430), " +"[#4462](https://github.com/adap/flower/pull/4462))" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:64 msgid "" -"**Improve Docker support for enterprise deployments** " -"([#4050](https://github.com/adap/flower/pull/4050), " -"[#4090](https://github.com/adap/flower/pull/4090), " -"[#3784](https://github.com/adap/flower/pull/3784), " -"[#3998](https://github.com/adap/flower/pull/3998), " -"[#4094](https://github.com/adap/flower/pull/4094), " -"[#3722](https://github.com/adap/flower/pull/3722))" +"The simulation engine has been significantly improved, resulting in " +"dramatically faster simulations. Additionally, the `flwr-simulation` " +"command has been introduced to enhance maintainability and provide a " +"dedicated entry point for running simulations." msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:66 msgid "" -"Flower 1.11 ships many Docker improvements that are especially useful for" -" enterprise deployments:" -msgstr "" - -#: ../../source/ref-changelog.md:67 -msgid "`flwr/supernode` comes with a new Alpine Docker image." +"**Improve SuperLink message management** " +"([#4378](https://github.com/adap/flower/pull/4378), " +"[#4369](https://github.com/adap/flower/pull/4369))" msgstr "" #: ../../source/ref-changelog.md:68 msgid "" -"`flwr/clientapp` is a new image to be used with the `--isolation=process`" -" option. In this mode, SuperNode and `ClientApp` run in two different " -"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " -"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " -"runs the `ClientApp`. This is the recommended way to deploy Flower in " -"enterprise settings." -msgstr "" - -#: ../../source/ref-changelog.md:69 -msgid "" -"New all-in-one Docker Compose enables you to easily start a full Flower " -"Deployment Engine on a single machine." +"SuperLink now validates the destination node ID of instruction messages " +"and checks the TTL (time-to-live) for reply messages. When pulling reply " +"messages, an error reply will be generated and returned if the " +"corresponding instruction message does not exist, has expired, or if the " +"reply message exists but has expired." msgstr "" #: ../../source/ref-changelog.md:70 msgid "" -"Completely new Docker documentation: " -"https://flower.ai/docs/framework/docker/index.html" +"**Introduce FedDebug baseline** " +"([#3783](https://github.com/adap/flower/pull/3783))" msgstr "" #: ../../source/ref-changelog.md:72 msgid "" -"**Improve SuperNode authentication** " -"([#4043](https://github.com/adap/flower/pull/4043), " -"[#4047](https://github.com/adap/flower/pull/4047), " -"[#4074](https://github.com/adap/flower/pull/4074))" +"FedDebug is a framework that enhances debugging in Federated Learning by " +"enabling interactive inspection of the training process and automatically" +" identifying clients responsible for degrading the global model's " +"performance—all without requiring testing data or labels. Learn more in " +"the [FedDebug baseline " +"documentation](https://flower.ai/docs/baselines/feddebug.html)." msgstr "" #: ../../source/ref-changelog.md:74 msgid "" -"SuperNode auth has been improved in several ways, including improved " -"logging, improved testing, and improved error handling." +"**Update documentation** " +"([#4511](https://github.com/adap/flower/pull/4511), " +"[#4010](https://github.com/adap/flower/pull/4010), " +"[#4396](https://github.com/adap/flower/pull/4396), " +"[#4499](https://github.com/adap/flower/pull/4499), " +"[#4269](https://github.com/adap/flower/pull/4269), " +"[#3340](https://github.com/adap/flower/pull/3340), " +"[#4482](https://github.com/adap/flower/pull/4482), " +"[#4387](https://github.com/adap/flower/pull/4387), " +"[#4342](https://github.com/adap/flower/pull/4342), " +"[#4492](https://github.com/adap/flower/pull/4492), " +"[#4474](https://github.com/adap/flower/pull/4474), " +"[#4500](https://github.com/adap/flower/pull/4500), " +"[#4514](https://github.com/adap/flower/pull/4514), " +"[#4236](https://github.com/adap/flower/pull/4236), " +"[#4112](https://github.com/adap/flower/pull/4112), " +"[#3367](https://github.com/adap/flower/pull/3367), " +"[#4501](https://github.com/adap/flower/pull/4501), " +"[#4373](https://github.com/adap/flower/pull/4373), " +"[#4409](https://github.com/adap/flower/pull/4409), " +"[#4356](https://github.com/adap/flower/pull/4356), " +"[#4520](https://github.com/adap/flower/pull/4520), " +"[#4524](https://github.com/adap/flower/pull/4524), " +"[#4525](https://github.com/adap/flower/pull/4525), " +"[#4526](https://github.com/adap/flower/pull/4526), " +"[#4527](https://github.com/adap/flower/pull/4527), " +"[#4528](https://github.com/adap/flower/pull/4528), " +"[#4545](https://github.com/adap/flower/pull/4545), " +"[#4522](https://github.com/adap/flower/pull/4522), " +"[#4534](https://github.com/adap/flower/pull/4534), " +"[#4513](https://github.com/adap/flower/pull/4513), " +"[#4529](https://github.com/adap/flower/pull/4529), " +"[#4441](https://github.com/adap/flower/pull/4441), " +"[#4530](https://github.com/adap/flower/pull/4530), " +"[#4470](https://github.com/adap/flower/pull/4470), " +"[#4553](https://github.com/adap/flower/pull/4553), " +"[#4531](https://github.com/adap/flower/pull/4531), " +"[#4554](https://github.com/adap/flower/pull/4554), " +"[#4555](https://github.com/adap/flower/pull/4555), " +"[#4552](https://github.com/adap/flower/pull/4552), " +"[#4533](https://github.com/adap/flower/pull/4533))" msgstr "" #: ../../source/ref-changelog.md:76 msgid "" -"**Update** `flwr new` **templates** " -"([#3933](https://github.com/adap/flower/pull/3933), " -"[#3894](https://github.com/adap/flower/pull/3894), " -"[#3930](https://github.com/adap/flower/pull/3930), " -"[#3931](https://github.com/adap/flower/pull/3931), " -"[#3997](https://github.com/adap/flower/pull/3997), " -"[#3979](https://github.com/adap/flower/pull/3979), " -"[#3965](https://github.com/adap/flower/pull/3965), " -"[#4013](https://github.com/adap/flower/pull/4013), " -"[#4064](https://github.com/adap/flower/pull/4064))" +"Many documentation pages and tutorials have been updated to improve " +"clarity, fix typos, incorporate user feedback, and stay aligned with the " +"latest features in the framework. Key updates include adding a guide for " +"designing stateful `ClientApp` objects, updating the comprehensive guide " +"for setting up and running Flower's `Simulation Engine`, updating the " +"XGBoost, scikit-learn, and JAX quickstart tutorials to use `flwr run`, " +"updating DP guide, removing outdated pages, updating Docker docs, and " +"marking legacy functions as deprecated. The [Secure Aggregation " +"Protocols](https://flower.ai/docs/framework/contributor-ref-secure-" +"aggregation-protocols.html) page has also been updated." msgstr "" #: ../../source/ref-changelog.md:78 msgid "" -"All `flwr new` templates have been updated to show the latest recommended" -" use of Flower APIs." +"**Update examples and templates** " +"([#4510](https://github.com/adap/flower/pull/4510), " +"[#4368](https://github.com/adap/flower/pull/4368), " +"[#4121](https://github.com/adap/flower/pull/4121), " +"[#4329](https://github.com/adap/flower/pull/4329), " +"[#4382](https://github.com/adap/flower/pull/4382), " +"[#4248](https://github.com/adap/flower/pull/4248), " +"[#4395](https://github.com/adap/flower/pull/4395), " +"[#4386](https://github.com/adap/flower/pull/4386), " +"[#4408](https://github.com/adap/flower/pull/4408))" msgstr "" #: ../../source/ref-changelog.md:80 msgid "" -"**Improve Simulation Engine** " -"([#4095](https://github.com/adap/flower/pull/4095), " -"[#3913](https://github.com/adap/flower/pull/3913), " -"[#4059](https://github.com/adap/flower/pull/4059), " -"[#3954](https://github.com/adap/flower/pull/3954), " -"[#4071](https://github.com/adap/flower/pull/4071), " -"[#3985](https://github.com/adap/flower/pull/3985), " -"[#3988](https://github.com/adap/flower/pull/3988))" +"Multiple examples and templates have been updated to enhance usability " +"and correctness. The updates include the `30-minute-tutorial`, " +"`quickstart-jax`, `quickstart-pytorch`, `advanced-tensorflow` examples, " +"and the FlowerTune template." msgstr "" #: ../../source/ref-changelog.md:82 msgid "" -"The Flower Simulation Engine comes with several updates, including " -"improved run config support, verbose logging, simulation backend " -"configuration via `flwr run`, and more." +"**Improve Docker support** " +"([#4506](https://github.com/adap/flower/pull/4506), " +"[#4424](https://github.com/adap/flower/pull/4424), " +"[#4224](https://github.com/adap/flower/pull/4224), " +"[#4413](https://github.com/adap/flower/pull/4413), " +"[#4414](https://github.com/adap/flower/pull/4414), " +"[#4336](https://github.com/adap/flower/pull/4336), " +"[#4420](https://github.com/adap/flower/pull/4420), " +"[#4407](https://github.com/adap/flower/pull/4407), " +"[#4422](https://github.com/adap/flower/pull/4422), " +"[#4532](https://github.com/adap/flower/pull/4532), " +"[#4540](https://github.com/adap/flower/pull/4540))" msgstr "" #: ../../source/ref-changelog.md:84 msgid "" -"**Improve** `RecordSet` " -"([#4052](https://github.com/adap/flower/pull/4052), " -"[#3218](https://github.com/adap/flower/pull/3218), " -"[#4016](https://github.com/adap/flower/pull/4016))" +"Docker images and configurations have been updated, including updating " +"Docker Compose files to version 1.13.0, refactoring the Docker build " +"matrix for better maintainability, updating `docker/build-push-action` to" +" 6.9.0, and improving Docker documentation." msgstr "" #: ../../source/ref-changelog.md:86 msgid "" -"`RecordSet` is the core object to exchange model parameters, " -"configuration values and metrics between `ClientApp` and `ServerApp`. " -"This release ships several smaller improvements to `RecordSet` and " -"related `*Record` types." +"**Allow app installation without internet access** " +"([#4479](https://github.com/adap/flower/pull/4479), " +"[#4475](https://github.com/adap/flower/pull/4475))" msgstr "" #: ../../source/ref-changelog.md:88 msgid "" -"**Update documentation** " -"([#3972](https://github.com/adap/flower/pull/3972), " -"[#3925](https://github.com/adap/flower/pull/3925), " -"[#4061](https://github.com/adap/flower/pull/4061), " -"[#3984](https://github.com/adap/flower/pull/3984), " -"[#3917](https://github.com/adap/flower/pull/3917), " -"[#3900](https://github.com/adap/flower/pull/3900), " -"[#4066](https://github.com/adap/flower/pull/4066), " -"[#3765](https://github.com/adap/flower/pull/3765), " -"[#4021](https://github.com/adap/flower/pull/4021), " -"[#3906](https://github.com/adap/flower/pull/3906), " -"[#4063](https://github.com/adap/flower/pull/4063), " -"[#4076](https://github.com/adap/flower/pull/4076), " -"[#3920](https://github.com/adap/flower/pull/3920), " -"[#3916](https://github.com/adap/flower/pull/3916))" +"The `flwr build` command now includes a wheel file in the FAB, enabling " +"Flower app installation in environments without internet access via `flwr" +" install`." msgstr "" #: ../../source/ref-changelog.md:90 msgid "" -"Many parts of the documentation, including the main tutorial, have been " -"migrated to show new Flower APIs and other new Flower features like the " -"improved Docker support." +"**Improve `flwr log` command** " +"([#4391](https://github.com/adap/flower/pull/4391), " +"[#4411](https://github.com/adap/flower/pull/4411), " +"[#4390](https://github.com/adap/flower/pull/4390), " +"[#4397](https://github.com/adap/flower/pull/4397))" msgstr "" #: ../../source/ref-changelog.md:92 msgid "" -"**Migrate code example to use new Flower APIs** " -"([#3758](https://github.com/adap/flower/pull/3758), " -"[#3701](https://github.com/adap/flower/pull/3701), " -"[#3919](https://github.com/adap/flower/pull/3919), " -"[#3918](https://github.com/adap/flower/pull/3918), " -"[#3934](https://github.com/adap/flower/pull/3934), " -"[#3893](https://github.com/adap/flower/pull/3893), " -"[#3833](https://github.com/adap/flower/pull/3833), " -"[#3922](https://github.com/adap/flower/pull/3922), " -"[#3846](https://github.com/adap/flower/pull/3846), " -"[#3777](https://github.com/adap/flower/pull/3777), " -"[#3874](https://github.com/adap/flower/pull/3874), " -"[#3873](https://github.com/adap/flower/pull/3873), " -"[#3935](https://github.com/adap/flower/pull/3935), " -"[#3754](https://github.com/adap/flower/pull/3754), " -"[#3980](https://github.com/adap/flower/pull/3980), " -"[#4089](https://github.com/adap/flower/pull/4089), " -"[#4046](https://github.com/adap/flower/pull/4046), " -"[#3314](https://github.com/adap/flower/pull/3314), " -"[#3316](https://github.com/adap/flower/pull/3316), " -"[#3295](https://github.com/adap/flower/pull/3295), " -"[#3313](https://github.com/adap/flower/pull/3313))" +"**Refactor SuperNode for better maintainability and efficiency** " +"([#4439](https://github.com/adap/flower/pull/4439), " +"[#4348](https://github.com/adap/flower/pull/4348), " +"[#4512](https://github.com/adap/flower/pull/4512), " +"[#4485](https://github.com/adap/flower/pull/4485))" msgstr "" #: ../../source/ref-changelog.md:94 -msgid "Many code examples have been migrated to use new Flower APIs." +msgid "" +"**Support NumPy `2.0`** " +"([#4440](https://github.com/adap/flower/pull/4440))" msgstr "" #: ../../source/ref-changelog.md:96 msgid "" -"**Update Flower framework, framework internals and quality " -"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " -"[#4053](https://github.com/adap/flower/pull/4053), " -"[#4098](https://github.com/adap/flower/pull/4098), " -"[#4067](https://github.com/adap/flower/pull/4067), " -"[#4105](https://github.com/adap/flower/pull/4105), " -"[#4048](https://github.com/adap/flower/pull/4048), " -"[#4107](https://github.com/adap/flower/pull/4107), " -"[#4069](https://github.com/adap/flower/pull/4069), " -"[#3915](https://github.com/adap/flower/pull/3915), " -"[#4101](https://github.com/adap/flower/pull/4101), " -"[#4108](https://github.com/adap/flower/pull/4108), " -"[#3914](https://github.com/adap/flower/pull/3914), " -"[#4068](https://github.com/adap/flower/pull/4068), " -"[#4041](https://github.com/adap/flower/pull/4041), " -"[#4040](https://github.com/adap/flower/pull/4040), " -"[#3986](https://github.com/adap/flower/pull/3986), " -"[#4026](https://github.com/adap/flower/pull/4026), " -"[#3961](https://github.com/adap/flower/pull/3961), " -"[#3975](https://github.com/adap/flower/pull/3975), " -"[#3983](https://github.com/adap/flower/pull/3983), " -"[#4091](https://github.com/adap/flower/pull/4091), " -"[#3982](https://github.com/adap/flower/pull/3982), " -"[#4079](https://github.com/adap/flower/pull/4079), " -"[#4073](https://github.com/adap/flower/pull/4073), " -"[#4060](https://github.com/adap/flower/pull/4060), " -"[#4106](https://github.com/adap/flower/pull/4106), " -"[#4080](https://github.com/adap/flower/pull/4080), " -"[#3974](https://github.com/adap/flower/pull/3974), " -"[#3996](https://github.com/adap/flower/pull/3996), " -"[#3991](https://github.com/adap/flower/pull/3991), " -"[#3981](https://github.com/adap/flower/pull/3981), " -"[#4093](https://github.com/adap/flower/pull/4093), " -"[#4100](https://github.com/adap/flower/pull/4100), " -"[#3939](https://github.com/adap/flower/pull/3939), " -"[#3955](https://github.com/adap/flower/pull/3955), " -"[#3940](https://github.com/adap/flower/pull/3940), " -"[#4038](https://github.com/adap/flower/pull/4038))" +"**Update infrastructure and CI/CD** " +"([#4466](https://github.com/adap/flower/pull/4466), " +"[#4419](https://github.com/adap/flower/pull/4419), " +"[#4338](https://github.com/adap/flower/pull/4338), " +"[#4334](https://github.com/adap/flower/pull/4334), " +"[#4456](https://github.com/adap/flower/pull/4456), " +"[#4446](https://github.com/adap/flower/pull/4446), " +"[#4415](https://github.com/adap/flower/pull/4415))" msgstr "" -#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:98 msgid "" -"As always, many parts of the Flower framework and quality infrastructure " -"were improved and updated." +"**Bugfixes** ([#4404](https://github.com/adap/flower/pull/4404), " +"[#4518](https://github.com/adap/flower/pull/4518), " +"[#4452](https://github.com/adap/flower/pull/4452), " +"[#4376](https://github.com/adap/flower/pull/4376), " +"[#4493](https://github.com/adap/flower/pull/4493), " +"[#4436](https://github.com/adap/flower/pull/4436), " +"[#4410](https://github.com/adap/flower/pull/4410), " +"[#4442](https://github.com/adap/flower/pull/4442), " +"[#4375](https://github.com/adap/flower/pull/4375), " +"[#4515](https://github.com/adap/flower/pull/4515))" msgstr "" -#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 -#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +#: ../../source/ref-changelog.md:100 +msgid "" +"**General improvements** " +"([#4454](https://github.com/adap/flower/pull/4454), " +"[#4365](https://github.com/adap/flower/pull/4365), " +"[#4423](https://github.com/adap/flower/pull/4423), " +"[#4516](https://github.com/adap/flower/pull/4516), " +"[#4509](https://github.com/adap/flower/pull/4509), " +"[#4498](https://github.com/adap/flower/pull/4498), " +"[#4371](https://github.com/adap/flower/pull/4371), " +"[#4449](https://github.com/adap/flower/pull/4449), " +"[#4488](https://github.com/adap/flower/pull/4488), " +"[#4478](https://github.com/adap/flower/pull/4478), " +"[#4392](https://github.com/adap/flower/pull/4392), " +"[#4483](https://github.com/adap/flower/pull/4483), " +"[#4517](https://github.com/adap/flower/pull/4517), " +"[#4330](https://github.com/adap/flower/pull/4330), " +"[#4458](https://github.com/adap/flower/pull/4458), " +"[#4347](https://github.com/adap/flower/pull/4347), " +"[#4429](https://github.com/adap/flower/pull/4429), " +"[#4463](https://github.com/adap/flower/pull/4463), " +"[#4496](https://github.com/adap/flower/pull/4496), " +"[#4508](https://github.com/adap/flower/pull/4508), " +"[#4444](https://github.com/adap/flower/pull/4444), " +"[#4417](https://github.com/adap/flower/pull/4417), " +"[#4504](https://github.com/adap/flower/pull/4504), " +"[#4418](https://github.com/adap/flower/pull/4418), " +"[#4480](https://github.com/adap/flower/pull/4480), " +"[#4455](https://github.com/adap/flower/pull/4455), " +"[#4468](https://github.com/adap/flower/pull/4468), " +"[#4385](https://github.com/adap/flower/pull/4385), " +"[#4487](https://github.com/adap/flower/pull/4487), " +"[#4393](https://github.com/adap/flower/pull/4393), " +"[#4489](https://github.com/adap/flower/pull/4489), " +"[#4389](https://github.com/adap/flower/pull/4389), " +"[#4507](https://github.com/adap/flower/pull/4507), " +"[#4469](https://github.com/adap/flower/pull/4469), " +"[#4340](https://github.com/adap/flower/pull/4340), " +"[#4353](https://github.com/adap/flower/pull/4353), " +"[#4494](https://github.com/adap/flower/pull/4494), " +"[#4461](https://github.com/adap/flower/pull/4461), " +"[#4362](https://github.com/adap/flower/pull/4362), " +"[#4473](https://github.com/adap/flower/pull/4473), " +"[#4405](https://github.com/adap/flower/pull/4405), " +"[#4416](https://github.com/adap/flower/pull/4416), " +"[#4453](https://github.com/adap/flower/pull/4453), " +"[#4491](https://github.com/adap/flower/pull/4491), " +"[#4539](https://github.com/adap/flower/pull/4539), " +"[#4542](https://github.com/adap/flower/pull/4542), " +"[#4538](https://github.com/adap/flower/pull/4538), " +"[#4543](https://github.com/adap/flower/pull/4543), " +"[#4541](https://github.com/adap/flower/pull/4541), " +"[#4550](https://github.com/adap/flower/pull/4550), " +"[#4481](https://github.com/adap/flower/pull/4481))" +msgstr "" + +#: ../../source/ref-changelog.md:104 ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:420 ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:1495 msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:106 +#, fuzzy +msgid "**Deprecate Python 3.9**" +msgstr "**PR 만들기**" + +#: ../../source/ref-changelog.md:108 msgid "" -"**Deprecate accessing `Context` via `Client.context`** " -"([#3797](https://github.com/adap/flower/pull/3797))" +"Flower is deprecating support for Python 3.9 as several of its " +"dependencies are phasing out compatibility with this version. While no " +"immediate changes have been made, users are encouraged to plan for " +"upgrading to a supported Python version." msgstr "" -#: ../../source/ref-changelog.md:104 -msgid "" -"Now that both `client_fn` and `server_fn` receive a `Context` object, " -"accessing `Context` via `Client.context` is deprecated. `Client.context` " -"will be removed in a future release. If you need to access `Context` in " -"your `Client` implementation, pass it manually when creating the `Client`" -" instance in `client_fn`:" +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:200 +#: ../../source/ref-changelog.md:234 ../../source/ref-changelog.md:314 +#: ../../source/ref-changelog.md:430 ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:600 ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:787 ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:941 ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:1068 ../../source/ref-changelog.md:1130 +#: ../../source/ref-changelog.md:1149 ../../source/ref-changelog.md:1305 +#: ../../source/ref-changelog.md:1376 ../../source/ref-changelog.md:1413 +#: ../../source/ref-changelog.md:1456 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:112 msgid "" -"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" -" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " -"[#4077](https://github.com/adap/flower/pull/4077), " -"[#3850](https://github.com/adap/flower/pull/3850))" +"**Remove `flower-superexec` command** " +"([#4351](https://github.com/adap/flower/pull/4351))" msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:114 msgid "" -"The CLI commands `flower-supernode` and `flower-server-app` now accept an" -" app directory as argument (instead of references to a `ClientApp` or " -"`ServerApp`). An app directory is any directory containing a " -"`pyproject.toml` file (with the appropriate Flower config fields set). " -"The easiest way to generate a compatible project structure is to use " -"`flwr new`." +"The `flower-superexec` command, previously used to launch SuperExec, is " +"no longer functional as SuperExec has been merged into SuperLink. " +"Starting an additional SuperExec is no longer necessary when SuperLink is" +" initiated." msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:116 msgid "" -"**Disable** `flower-client-app` **CLI command** " -"([#4022](https://github.com/adap/flower/pull/4022))" +"**Remove `flower-server-app` command** " +"([#4490](https://github.com/adap/flower/pull/4490))" msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." +#: ../../source/ref-changelog.md:118 +msgid "" +"The `flower-server-app` command has been removed. To start a Flower app, " +"please use the `flwr run` command instead." msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:120 msgid "" -"**Use spaces instead of commas for separating config args** " -"([#4000](https://github.com/adap/flower/pull/4000))" +"**Remove `app` argument from `flower-supernode` command** " +"([#4497](https://github.com/adap/flower/pull/4497))" msgstr "" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:122 msgid "" -"When passing configs (run config, node config) to Flower, you now need to" -" separate key-value pairs using spaces instead of commas. For example:" +"The usage of `flower-supernode ` has been removed. SuperNode " +"will now load the FAB delivered by SuperLink, and it is no longer " +"possible to directly specify an app directory." msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "Previously, you could pass configs using commas, like this:" +#: ../../source/ref-changelog.md:124 +msgid "" +"**Remove support for non-app simulations** " +"([#4431](https://github.com/adap/flower/pull/4431))" msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:126 msgid "" -"**Remove** `flwr example` **CLI command** " -"([#4084](https://github.com/adap/flower/pull/4084))" +"The simulation engine (via `flower-simulation`) now exclusively supports " +"passing an app." msgstr "" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:128 msgid "" -"The experimental `flwr example` CLI command has been removed. Use `flwr " -"new` to generate a project and then run it using `flwr run`." +"**Rename CLI arguments for `flower-superlink` command** " +"([#4412](https://github.com/adap/flower/pull/4412))" msgstr "" -#: ../../source/ref-changelog.md:139 -msgid "v1.10.0 (2024-07-24)" +#: ../../source/ref-changelog.md:130 +msgid "" +"The `--driver-api-address` argument has been renamed to `--serverappio-" +"api-address` in the `flower-superlink` command to reflect the renaming of" +" the `Driver` service to the `ServerAppIo` service." msgstr "" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:132 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " -"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " -"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " +"**Rename CLI arguments for `flwr-serverapp` and `flwr-clientapp` " +"commands** ([#4495](https://github.com/adap/flower/pull/4495))" msgstr "" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:134 msgid "" -"**Introduce** `flwr run` **(beta)** " -"([#3810](https://github.com/adap/flower/pull/3810), " -"[#3826](https://github.com/adap/flower/pull/3826), " -"[#3880](https://github.com/adap/flower/pull/3880), " -"[#3807](https://github.com/adap/flower/pull/3807), " -"[#3800](https://github.com/adap/flower/pull/3800), " -"[#3814](https://github.com/adap/flower/pull/3814), " -"[#3811](https://github.com/adap/flower/pull/3811), " -"[#3809](https://github.com/adap/flower/pull/3809), " -"[#3819](https://github.com/adap/flower/pull/3819))" +"The CLI arguments have been renamed for clarity and consistency. " +"Specifically, `--superlink` for `flwr-serverapp` is now `--serverappio-" +"api-address`, and `--supernode` for `flwr-clientapp` is now " +"`--clientappio-api-address`." msgstr "" -#: ../../source/ref-changelog.md:151 -msgid "" -"Flower 1.10 ships the first beta release of the new `flwr run` command. " -"`flwr run` can run different projects using `flwr run path/to/project`, " -"it enables you to easily switch between different federations using `flwr" -" run . federation` and it runs your Flower project using either local " -"simulation or the new (experimental) SuperExec service. This allows " -"Flower to scale federatated learning from fast local simulation to large-" -"scale production deployment, seamlessly. All projects generated with " -"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " -"`flwr new` to generate a project and then run it using `flwr run`." +#: ../../source/ref-changelog.md:136 +msgid "v1.12.0 (2024-10-14)" msgstr "" -#: ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:142 msgid "" -"**Introduce run config** " -"([#3751](https://github.com/adap/flower/pull/3751), " -"[#3750](https://github.com/adap/flower/pull/3750), " -"[#3845](https://github.com/adap/flower/pull/3845), " -"[#3824](https://github.com/adap/flower/pull/3824), " -"[#3746](https://github.com/adap/flower/pull/3746), " -"[#3728](https://github.com/adap/flower/pull/3728), " -"[#3730](https://github.com/adap/flower/pull/3730), " -"[#3725](https://github.com/adap/flower/pull/3725), " -"[#3729](https://github.com/adap/flower/pull/3729), " -"[#3580](https://github.com/adap/flower/pull/3580), " -"[#3578](https://github.com/adap/flower/pull/3578), " -"[#3576](https://github.com/adap/flower/pull/3576), " -"[#3798](https://github.com/adap/flower/pull/3798), " -"[#3732](https://github.com/adap/flower/pull/3732), " -"[#3815](https://github.com/adap/flower/pull/3815))" +"`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel" +" J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, " +"`Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan " +"Gao`, `xiliguguagua` " msgstr "" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:146 msgid "" -"The new run config feature allows you to run your Flower project in " -"different configurations without having to change a single line of code. " -"You can now build a configurable `ServerApp` and `ClientApp` that read " -"configuration values at runtime. This enables you to specify config " -"values like `learning-rate=0.01` in `pyproject.toml` (under the " -"`[tool.flwr.app.config]` key). These config values can then be easily " -"overridden via `flwr run --run-config learning-rate=0.02`, and read from " -"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " -"new project using `flwr new` to see run config in action." +"**Introduce SuperExec log streaming** " +"([#3577](https://github.com/adap/flower/pull/3577), " +"[#3584](https://github.com/adap/flower/pull/3584), " +"[#4242](https://github.com/adap/flower/pull/4242), " +"[#3611](https://github.com/adap/flower/pull/3611), " +"[#3613](https://github.com/adap/flower/pull/3613))" msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:148 msgid "" -"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " -"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " -"[#3697](https://github.com/adap/flower/pull/3697), " -"[#3694](https://github.com/adap/flower/pull/3694), " -"[#3696](https://github.com/adap/flower/pull/3696))" +"Flower now supports log streaming from a remote SuperExec using the `flwr" +" log` command. This new feature allows you to monitor logs from SuperExec" +" in real time via `flwr log ` (or `flwr log " +"`)." msgstr "" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:150 msgid "" -"The `client_fn` signature has been generalized to `client_fn(context: " -"Context) -> Client`. It now receives a `Context` object instead of the " -"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " -"`node_config` and `run_config`, among other things. This enables you to " -"build a configurable `ClientApp` that leverages the new run config " -"system." +"**Improve `flwr new` templates** " +"([#4291](https://github.com/adap/flower/pull/4291), " +"[#4292](https://github.com/adap/flower/pull/4292), " +"[#4293](https://github.com/adap/flower/pull/4293), " +"[#4294](https://github.com/adap/flower/pull/4294), " +"[#4295](https://github.com/adap/flower/pull/4295))" msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:152 msgid "" -"The previous signature `client_fn(cid: str)` is now deprecated and " -"support for it will be removed in a future release. Use " -"`client_fn(context: Context) -> Client` everywhere." +"The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and " +"PyTorch have been updated to improve usability and consistency across " +"frameworks." msgstr "" -#: ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:154 msgid "" -"**Introduce new** `server_fn(context)` " -"([#3773](https://github.com/adap/flower/pull/3773), " -"[#3796](https://github.com/adap/flower/pull/3796), " -"[#3771](https://github.com/adap/flower/pull/3771))" +"**Migrate ID handling to use unsigned 64-bit integers** " +"([#4170](https://github.com/adap/flower/pull/4170), " +"[#4237](https://github.com/adap/flower/pull/4237), " +"[#4243](https://github.com/adap/flower/pull/4243))" msgstr "" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:156 msgid "" -"In addition to the new `client_fn(context:Context)`, a new " -"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" -" `ServerApp` (instead of passing, for example, `Strategy`, directly). " -"This enables you to leverage the full `Context` on the server-side to " -"build a configurable `ServerApp`." +"Node IDs, run IDs, and related fields have been migrated from signed " +"64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To " +"support this change, the `uint64` type is fully supported in all " +"communications. You may now use `uint64` values in config and metric " +"dictionaries. For Python users, that means using `int` values larger than" +" the maximum value of `sint64` but less than the maximum value of " +"`uint64`." msgstr "" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-changelog.md:158 msgid "" -"**Relaunch all** `flwr new` **templates** " -"([#3877](https://github.com/adap/flower/pull/3877), " -"[#3821](https://github.com/adap/flower/pull/3821), " -"[#3587](https://github.com/adap/flower/pull/3587), " -"[#3795](https://github.com/adap/flower/pull/3795), " -"[#3875](https://github.com/adap/flower/pull/3875), " -"[#3859](https://github.com/adap/flower/pull/3859), " -"[#3760](https://github.com/adap/flower/pull/3760))" +"**Add Flower architecture explanation** " +"([#3270](https://github.com/adap/flower/pull/3270))" msgstr "" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:160 msgid "" -"All `flwr new` templates have been significantly updated to showcase new " -"Flower features and best practices. This includes using `flwr run` and " -"the new run config feature. You can now easily create a new project using" -" `flwr new` and, after following the instructions to install it, `flwr " -"run` it." +"A new [Flower architecture explainer](https://flower.ai/docs/framework" +"/explanation-flower-architecture.html) page introduces Flower components " +"step-by-step. Check out the `EXPLANATIONS` section of the Flower " +"documentation if you're interested." msgstr "" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:162 msgid "" -"**Introduce** `flower-supernode` **(preview)** " -"([#3353](https://github.com/adap/flower/pull/3353))" +"**Introduce FedRep baseline** " +"([#3790](https://github.com/adap/flower/pull/3790))" msgstr "" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:164 msgid "" -"The new `flower-supernode` CLI is here to replace `flower-client-app`. " -"`flower-supernode` brings full multi-app support to the Flower client-" -"side. It also allows to pass `--node-config` to the SuperNode, which is " -"accessible in your `ClientApp` via `Context` (using the new " -"`client_fn(context: Context)` signature)." +"FedRep is a federated learning algorithm that learns shared data " +"representations across clients while allowing each to maintain " +"personalized local models, balancing collaboration and individual " +"adaptation. Read all the details in the paper: \"Exploiting Shared " +"Representations for Personalized Federated Learning\" " +"([arxiv](https://arxiv.org/abs/2102.07078))" msgstr "" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:166 msgid "" -"**Introduce node config** " -"([#3782](https://github.com/adap/flower/pull/3782), " -"[#3780](https://github.com/adap/flower/pull/3780), " -"[#3695](https://github.com/adap/flower/pull/3695), " -"[#3886](https://github.com/adap/flower/pull/3886))" +"**Improve FlowerTune template and LLM evaluation pipelines** " +"([#4286](https://github.com/adap/flower/pull/4286), " +"[#3769](https://github.com/adap/flower/pull/3769), " +"[#4272](https://github.com/adap/flower/pull/4272), " +"[#4257](https://github.com/adap/flower/pull/4257), " +"[#4220](https://github.com/adap/flower/pull/4220), " +"[#4282](https://github.com/adap/flower/pull/4282), " +"[#4171](https://github.com/adap/flower/pull/4171), " +"[#4228](https://github.com/adap/flower/pull/4228), " +"[#4258](https://github.com/adap/flower/pull/4258), " +"[#4296](https://github.com/adap/flower/pull/4296), " +"[#4287](https://github.com/adap/flower/pull/4287), " +"[#4217](https://github.com/adap/flower/pull/4217), " +"[#4249](https://github.com/adap/flower/pull/4249), " +"[#4324](https://github.com/adap/flower/pull/4324), " +"[#4219](https://github.com/adap/flower/pull/4219), " +"[#4327](https://github.com/adap/flower/pull/4327))" msgstr "" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:168 msgid "" -"A new node config feature allows you to pass a static configuration to " -"the SuperNode. This configuration is read-only and available to every " -"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " -"config via `Context` (`context.node_config`)." +"Refined evaluation pipelines, metrics, and documentation for the upcoming" +" FlowerTune LLM Leaderboard across multiple domains including Finance, " +"Medical, and general NLP. Stay tuned for the official launch—we welcome " +"all federated learning and LLM enthusiasts to participate in this " +"exciting challenge!" msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:170 msgid "" -"**Introduce SuperExec (experimental)** " -"([#3605](https://github.com/adap/flower/pull/3605), " -"[#3723](https://github.com/adap/flower/pull/3723), " -"[#3731](https://github.com/adap/flower/pull/3731), " -"[#3589](https://github.com/adap/flower/pull/3589), " -"[#3604](https://github.com/adap/flower/pull/3604), " -"[#3622](https://github.com/adap/flower/pull/3622), " -"[#3838](https://github.com/adap/flower/pull/3838), " -"[#3720](https://github.com/adap/flower/pull/3720), " -"[#3606](https://github.com/adap/flower/pull/3606), " -"[#3602](https://github.com/adap/flower/pull/3602), " -"[#3603](https://github.com/adap/flower/pull/3603), " -"[#3555](https://github.com/adap/flower/pull/3555), " -"[#3808](https://github.com/adap/flower/pull/3808), " -"[#3724](https://github.com/adap/flower/pull/3724), " -"[#3658](https://github.com/adap/flower/pull/3658), " -"[#3629](https://github.com/adap/flower/pull/3629))" +"**Enhance Docker Support and Documentation** " +"([#4191](https://github.com/adap/flower/pull/4191), " +"[#4251](https://github.com/adap/flower/pull/4251), " +"[#4190](https://github.com/adap/flower/pull/4190), " +"[#3928](https://github.com/adap/flower/pull/3928), " +"[#4298](https://github.com/adap/flower/pull/4298), " +"[#4192](https://github.com/adap/flower/pull/4192), " +"[#4136](https://github.com/adap/flower/pull/4136), " +"[#4187](https://github.com/adap/flower/pull/4187), " +"[#4261](https://github.com/adap/flower/pull/4261), " +"[#4177](https://github.com/adap/flower/pull/4177), " +"[#4176](https://github.com/adap/flower/pull/4176), " +"[#4189](https://github.com/adap/flower/pull/4189), " +"[#4297](https://github.com/adap/flower/pull/4297), " +"[#4226](https://github.com/adap/flower/pull/4226))" msgstr "" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:172 msgid "" -"This is the first experimental release of Flower SuperExec, a new service" -" that executes your runs. It's not ready for production deployment just " -"yet, but don't hesitate to give it a try if you're interested." +"Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images," +" and comprehensively updated [Docker " +"documentation](https://flower.ai/docs/framework/docker/index.html) " +"including quickstart guides and distributed Docker Compose instructions." msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:174 msgid "" -"**Add new federated learning with tabular data example** " -"([#3568](https://github.com/adap/flower/pull/3568))" +"**Introduce Flower glossary** " +"([#4165](https://github.com/adap/flower/pull/4165), " +"[#4235](https://github.com/adap/flower/pull/4235))" msgstr "" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:176 msgid "" -"A new code example exemplifies a federated learning setup using the " -"Flower framework on the Adult Census Income tabular dataset." +"Added the [Federated Learning glossary](https://flower.ai/glossary/) to " +"the Flower repository, located under the `flower/glossary/` directory. " +"This resource aims to provide clear definitions and explanations of key " +"FL concepts. Community contributions are highly welcomed to help expand " +"and refine this knowledge base — this is probably the easiest way to " +"become a Flower contributor!" msgstr "" -#: ../../source/ref-changelog.md:187 +#: ../../source/ref-changelog.md:178 msgid "" -"**Create generic adapter layer (preview)** " -"([#3538](https://github.com/adap/flower/pull/3538), " -"[#3536](https://github.com/adap/flower/pull/3536), " -"[#3540](https://github.com/adap/flower/pull/3540))" +"**Implement Message Time-to-Live (TTL)** " +"([#3620](https://github.com/adap/flower/pull/3620), " +"[#3596](https://github.com/adap/flower/pull/3596), " +"[#3615](https://github.com/adap/flower/pull/3615), " +"[#3609](https://github.com/adap/flower/pull/3609), " +"[#3635](https://github.com/adap/flower/pull/3635))" msgstr "" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:180 msgid "" -"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" -" with Flower in a transparent way. This makes Flower more modular and " -"allows for integration into other federated learning solutions and " -"platforms." +"Added comprehensive TTL support for messages in Flower's SuperLink. " +"Messages are now automatically expired and cleaned up based on " +"configurable TTL values, available through the low-level API (and used by" +" default in the high-level API)." msgstr "" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:182 msgid "" -"**Refactor Flower Simulation Engine** " -"([#3581](https://github.com/adap/flower/pull/3581), " -"[#3471](https://github.com/adap/flower/pull/3471), " -"[#3804](https://github.com/adap/flower/pull/3804), " -"[#3468](https://github.com/adap/flower/pull/3468), " -"[#3839](https://github.com/adap/flower/pull/3839), " -"[#3806](https://github.com/adap/flower/pull/3806), " -"[#3861](https://github.com/adap/flower/pull/3861), " -"[#3543](https://github.com/adap/flower/pull/3543), " -"[#3472](https://github.com/adap/flower/pull/3472), " -"[#3829](https://github.com/adap/flower/pull/3829), " -"[#3469](https://github.com/adap/flower/pull/3469))" +"**Improve FAB handling** " +"([#4303](https://github.com/adap/flower/pull/4303), " +"[#4264](https://github.com/adap/flower/pull/4264), " +"[#4305](https://github.com/adap/flower/pull/4305), " +"[#4304](https://github.com/adap/flower/pull/4304))" msgstr "" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:184 msgid "" -"The Simulation Engine was significantly refactored. This results in " -"faster and more stable simulations. It is also the foundation for " -"upcoming changes that aim to provide the next level of performance and " -"configurability in federated learning simulations." +"An 8-character hash is now appended to the FAB file name. The `flwr " +"install` command installs FABs with a more flattened folder structure, " +"reducing it from 3 levels to 1." msgstr "" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:186 msgid "" -"**Optimize Docker containers** " -"([#3591](https://github.com/adap/flower/pull/3591))" +"**Update documentation** " +"([#3341](https://github.com/adap/flower/pull/3341), " +"[#3338](https://github.com/adap/flower/pull/3338), " +"[#3927](https://github.com/adap/flower/pull/3927), " +"[#4152](https://github.com/adap/flower/pull/4152), " +"[#4151](https://github.com/adap/flower/pull/4151), " +"[#3993](https://github.com/adap/flower/pull/3993))" msgstr "" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:188 msgid "" -"Flower Docker containers were optimized and updated to use that latest " -"Flower framework features." +"Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging " +"Face, Fastai) to use the new `flwr run` command and removed default title" +" from documentation base template. A new blockchain example has been " +"added to FAQ." msgstr "" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:190 msgid "" -"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " -"[#3789](https://github.com/adap/flower/pull/3789))" +"**Update example projects** " +"([#3716](https://github.com/adap/flower/pull/3716), " +"[#4007](https://github.com/adap/flower/pull/4007), " +"[#4130](https://github.com/adap/flower/pull/4130), " +"[#4234](https://github.com/adap/flower/pull/4234), " +"[#4206](https://github.com/adap/flower/pull/4206), " +"[#4188](https://github.com/adap/flower/pull/4188), " +"[#4247](https://github.com/adap/flower/pull/4247), " +"[#4331](https://github.com/adap/flower/pull/4331))" msgstr "" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:192 msgid "" -"Improved logging aims to be more concise and helpful to show you the " -"details you actually care about." +"Refreshed multiple example projects including vertical FL, PyTorch " +"(advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized " +"Hugging Face quickstart with a smaller language model and removed legacy " +"simulation examples." msgstr "" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:194 msgid "" -"**Refactor framework internals** " -"([#3621](https://github.com/adap/flower/pull/3621), " -"[#3792](https://github.com/adap/flower/pull/3792), " -"[#3772](https://github.com/adap/flower/pull/3772), " -"[#3805](https://github.com/adap/flower/pull/3805), " -"[#3583](https://github.com/adap/flower/pull/3583), " -"[#3825](https://github.com/adap/flower/pull/3825), " -"[#3597](https://github.com/adap/flower/pull/3597), " -"[#3802](https://github.com/adap/flower/pull/3802), " -"[#3569](https://github.com/adap/flower/pull/3569))" +"**Update translations** " +"([#4070](https://github.com/adap/flower/pull/4070), " +"[#4316](https://github.com/adap/flower/pull/4316), " +"[#4252](https://github.com/adap/flower/pull/4252), " +"[#4256](https://github.com/adap/flower/pull/4256), " +"[#4210](https://github.com/adap/flower/pull/4210), " +"[#4263](https://github.com/adap/flower/pull/4263), " +"[#4259](https://github.com/adap/flower/pull/4259))" msgstr "" -#: ../../source/ref-changelog.md:207 -#, fuzzy -msgid "Documentation improvements" -msgstr "선택적 개선 사항" - -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:196 msgid "" -"**Add 🇰🇷 Korean translations** " -"([#3680](https://github.com/adap/flower/pull/3680))" +"**General improvements** " +"([#4239](https://github.com/adap/flower/pull/4239), " +"[4276](https://github.com/adap/flower/pull/4276), " +"[4204](https://github.com/adap/flower/pull/4204), " +"[4184](https://github.com/adap/flower/pull/4184), " +"[4227](https://github.com/adap/flower/pull/4227), " +"[4183](https://github.com/adap/flower/pull/4183), " +"[4202](https://github.com/adap/flower/pull/4202), " +"[4250](https://github.com/adap/flower/pull/4250), " +"[4267](https://github.com/adap/flower/pull/4267), " +"[4246](https://github.com/adap/flower/pull/4246), " +"[4240](https://github.com/adap/flower/pull/4240), " +"[4265](https://github.com/adap/flower/pull/4265), " +"[4238](https://github.com/adap/flower/pull/4238), " +"[4275](https://github.com/adap/flower/pull/4275), " +"[4318](https://github.com/adap/flower/pull/4318), " +"[#4178](https://github.com/adap/flower/pull/4178), " +"[#4315](https://github.com/adap/flower/pull/4315), " +"[#4241](https://github.com/adap/flower/pull/4241), " +"[#4289](https://github.com/adap/flower/pull/4289), " +"[#4290](https://github.com/adap/flower/pull/4290), " +"[#4181](https://github.com/adap/flower/pull/4181), " +"[#4208](https://github.com/adap/flower/pull/4208), " +"[#4225](https://github.com/adap/flower/pull/4225), " +"[#4314](https://github.com/adap/flower/pull/4314), " +"[#4174](https://github.com/adap/flower/pull/4174), " +"[#4203](https://github.com/adap/flower/pull/4203), " +"[#4274](https://github.com/adap/flower/pull/4274), " +"[#3154](https://github.com/adap/flower/pull/3154), " +"[#4201](https://github.com/adap/flower/pull/4201), " +"[#4268](https://github.com/adap/flower/pull/4268), " +"[#4254](https://github.com/adap/flower/pull/4254), " +"[#3990](https://github.com/adap/flower/pull/3990), " +"[#4212](https://github.com/adap/flower/pull/4212), " +"[#2938](https://github.com/adap/flower/pull/2938), " +"[#4205](https://github.com/adap/flower/pull/4205), " +"[#4222](https://github.com/adap/flower/pull/4222), " +"[#4313](https://github.com/adap/flower/pull/4313), " +"[#3936](https://github.com/adap/flower/pull/3936), " +"[#4278](https://github.com/adap/flower/pull/4278), " +"[#4319](https://github.com/adap/flower/pull/4319), " +"[#4332](https://github.com/adap/flower/pull/4332), " +"[#4333](https://github.com/adap/flower/pull/4333))" +msgstr "" + +#: ../../source/ref-changelog.md:202 +msgid "" +"**Drop Python 3.8 support and update minimum version to 3.9** " +"([#4180](https://github.com/adap/flower/pull/4180), " +"[#4213](https://github.com/adap/flower/pull/4213), " +"[#4193](https://github.com/adap/flower/pull/4193), " +"[#4199](https://github.com/adap/flower/pull/4199), " +"[#4196](https://github.com/adap/flower/pull/4196), " +"[#4195](https://github.com/adap/flower/pull/4195), " +"[#4198](https://github.com/adap/flower/pull/4198), " +"[#4194](https://github.com/adap/flower/pull/4194))" +msgstr "" + +#: ../../source/ref-changelog.md:204 +msgid "" +"Python 3.8 support was deprecated in Flower 1.9, and this release removes" +" support. Flower now requires Python 3.9 or later (Python 3.11 is " +"recommended). CI and documentation were updated to use Python 3.9 as the " +"minimum supported version. Flower now supports Python 3.9 to 3.12." +msgstr "" + +#: ../../source/ref-changelog.md:206 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:212 msgid "" -"**Update translations** " -"([#3586](https://github.com/adap/flower/pull/3586), " -"[#3679](https://github.com/adap/flower/pull/3679), " -"[#3570](https://github.com/adap/flower/pull/3570), " -"[#3681](https://github.com/adap/flower/pull/3681), " -"[#3617](https://github.com/adap/flower/pull/3617), " -"[#3674](https://github.com/adap/flower/pull/3674), " -"[#3671](https://github.com/adap/flower/pull/3671), " -"[#3572](https://github.com/adap/flower/pull/3572), " -"[#3631](https://github.com/adap/flower/pull/3631))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:213 -msgid "" -"**Update documentation** " -"([#3864](https://github.com/adap/flower/pull/3864), " -"[#3688](https://github.com/adap/flower/pull/3688), " -"[#3562](https://github.com/adap/flower/pull/3562), " -"[#3641](https://github.com/adap/flower/pull/3641), " -"[#3384](https://github.com/adap/flower/pull/3384), " -"[#3634](https://github.com/adap/flower/pull/3634), " -"[#3823](https://github.com/adap/flower/pull/3823), " -"[#3793](https://github.com/adap/flower/pull/3793), " -"[#3707](https://github.com/adap/flower/pull/3707))" -msgstr "" +#: ../../source/ref-changelog.md:214 +#, fuzzy +msgid "Improvements" +msgstr "선택적 개선 사항" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:216 msgid "" -"Updated documentation includes new install instructions for different " -"shells, a new Flower Code Examples documentation landing page, new `flwr`" -" CLI docs and an updated federated XGBoost code example." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-changelog.md:219 -msgid "**Deprecate** `client_fn(cid: str)`" +#: ../../source/ref-changelog.md:218 +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:220 msgid "" -"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " -"This signature is now deprecated. Use the new signature " -"`client_fn(context: Context) -> Client` instead. The new argument " -"`context` allows accessing `node_id`, `node_config`, `run_config` and " -"other `Context` features. When running using the simulation engine (or " -"using `flower-supernode` with a custom `--node-config partition-id=...`)," -" `context.node_config[\"partition-id\"]` will return an `int` partition " -"ID that can be used with Flower Datasets to load a different partition of" -" the dataset on each simulated or deployed SuperNode." +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:222 msgid "" -"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" -" `ServerApp` **directly**" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-changelog.md:225 +#: ../../source/ref-changelog.md:224 msgid "" -"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " -"is now deprecated. Instead of passing " -"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " -"pass them wrapped in a `server_fn(context: Context) -> " -"ServerAppComponents` function, like this: " -"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " -"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " -"to that, `server_fn` allows you to access `Context` (for example, to read" -" the `run_config`)." +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:226 msgid "" -"**Remove support for `client_ids` in `start_simulation`** " -"([#3699](https://github.com/adap/flower/pull/3699))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:228 msgid "" -"The (rarely used) feature that allowed passing custom `client_ids` to the" -" `start_simulation` function was removed. This removal is part of a " -"bigger effort to refactor the simulation engine and unify how the Flower " -"internals work in simulation and deployment." +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-changelog.md:233 +#: ../../source/ref-changelog.md:230 msgid "" -"**Remove `flower-driver-api` and `flower-fleet-api`** " -"([#3418](https://github.com/adap/flower/pull/3418))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:232 msgid "" -"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" -"api` were removed in an effort to streamline the SuperLink developer " -"experience. Use `flower-superlink` instead." +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-changelog.md:237 -msgid "v1.9.0 (2024-06-10)" +#: ../../source/ref-changelog.md:238 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-changelog.md:243 +#: ../../source/ref-changelog.md:244 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:247 +#: ../../source/ref-changelog.md:248 msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:249 +#: ../../source/ref-changelog.md:250 msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:251 +#: ../../source/ref-changelog.md:252 msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:253 -msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +#: ../../source/ref-changelog.md:254 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:255 +#: ../../source/ref-changelog.md:256 msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-changelog.md:257 +#: ../../source/ref-changelog.md:258 msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:259 -msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +#: ../../source/ref-changelog.md:260 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-changelog.md:261 +#: ../../source/ref-changelog.md:262 msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" #: ../../source/ref-changelog.md:263 msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:265 +#: ../../source/ref-changelog.md:264 msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:266 msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:268 msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" +msgstr "" + +#: ../../source/ref-changelog.md:270 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" #: ../../source/ref-changelog.md:271 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." +msgstr "" + +#: ../../source/ref-changelog.md:272 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" #: ../../source/ref-changelog.md:273 msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" #: ../../source/ref-changelog.md:275 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" #: ../../source/ref-changelog.md:277 msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" #: ../../source/ref-changelog.md:279 msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" #: ../../source/ref-changelog.md:281 msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" #: ../../source/ref-changelog.md:283 msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" #: ../../source/ref-changelog.md:285 msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" #: ../../source/ref-changelog.md:287 msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" #: ../../source/ref-changelog.md:289 msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" #: ../../source/ref-changelog.md:291 msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" #: ../../source/ref-changelog.md:293 msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." msgstr "" #: ../../source/ref-changelog.md:295 msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" msgstr "" #: ../../source/ref-changelog.md:297 -msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +msgid "Many code examples have been migrated to use new Flower APIs." msgstr "" #: ../../source/ref-changelog.md:299 msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" msgstr "" -#: ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:305 msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" -msgstr "" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" +msgstr "" -#: ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:307 msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:305 -msgid "As always, Flower code examples have received many updates." +#: ../../source/ref-changelog.md:316 +msgid "" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: ../../source/ref-changelog.md:307 +#: ../../source/ref-changelog.md:318 msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:311 -msgid "**Deprecate Python 3.8 support**" +#: ../../source/ref-changelog.md:320 +msgid "" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-changelog.md:313 -msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +#: ../../source/ref-changelog.md:322 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:315 +#: ../../source/ref-changelog.md:324 msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-changelog.md:317 +#: ../../source/ref-changelog.md:326 msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:319 -msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +#: ../../source/ref-changelog.md:332 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:321 +#: ../../source/ref-changelog.md:338 msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-changelog.md:325 +#: ../../source/ref-changelog.md:340 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." +msgstr "" + +#: ../../source/ref-changelog.md:342 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-changelog.md:327 +#: ../../source/ref-changelog.md:348 msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:329 +#: ../../source/ref-changelog.md:352 msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-changelog.md:331 +#: ../../source/ref-changelog.md:354 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:333 +#: ../../source/ref-changelog.md:356 msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-changelog.md:335 +#: ../../source/ref-changelog.md:358 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:337 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-changelog.md:360 +msgid "" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-changelog.md:343 +#: ../../source/ref-changelog.md:362 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-changelog.md:347 +#: ../../source/ref-changelog.md:364 msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:349 +#: ../../source/ref-changelog.md:366 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-changelog.md:351 +#: ../../source/ref-changelog.md:368 msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:353 +#: ../../source/ref-changelog.md:370 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-changelog.md:355 +#: ../../source/ref-changelog.md:372 msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:357 +#: ../../source/ref-changelog.md:374 msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-changelog.md:359 +#: ../../source/ref-changelog.md:376 msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:361 +#: ../../source/ref-changelog.md:378 msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-changelog.md:363 +#: ../../source/ref-changelog.md:380 msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-changelog.md:365 +#: ../../source/ref-changelog.md:382 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-changelog.md:367 +#: ../../source/ref-changelog.md:384 msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:369 +#: ../../source/ref-changelog.md:386 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-changelog.md:371 +#: ../../source/ref-changelog.md:388 msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:373 +#: ../../source/ref-changelog.md:390 msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-changelog.md:375 +#: ../../source/ref-changelog.md:392 msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-changelog.md:377 +#: ../../source/ref-changelog.md:394 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-changelog.md:379 +#: ../../source/ref-changelog.md:396 msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:381 +#: ../../source/ref-changelog.md:398 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-changelog.md:383 +#: ../../source/ref-changelog.md:400 msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:385 +#: ../../source/ref-changelog.md:402 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-changelog.md:387 +#: ../../source/ref-changelog.md:404 msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:389 +#: ../../source/ref-changelog.md:406 msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-changelog.md:391 +#: ../../source/ref-changelog.md:410 +#, fuzzy +msgid "Documentation improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:412 msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-changelog.md:393 +#: ../../source/ref-changelog.md:414 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-changelog.md:395 +#: ../../source/ref-changelog.md:416 msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" -msgstr "" - -#: ../../source/ref-changelog.md:401 -msgid "v1.7.0 (2024-02-05)" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-changelog.md:407 +#: ../../source/ref-changelog.md:418 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-changelog.md:411 -msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +#: ../../source/ref-changelog.md:422 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-changelog.md:413 +#: ../../source/ref-changelog.md:424 msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-changelog.md:415 +#: ../../source/ref-changelog.md:426 msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-changelog.md:417 +#: ../../source/ref-changelog.md:428 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-changelog.md:419 +#: ../../source/ref-changelog.md:432 msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-changelog.md:421 +#: ../../source/ref-changelog.md:434 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:436 msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-changelog.md:425 +#: ../../source/ref-changelog.md:438 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:427 -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +#: ../../source/ref-changelog.md:440 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-changelog.md:446 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:450 msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-changelog.md:452 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:454 msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:456 msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:458 msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:460 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:462 msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "" - -#: ../../source/ref-changelog.md:445 -msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" -msgstr "" - -#: ../../source/ref-changelog.md:447 -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." -msgstr "" - -#: ../../source/ref-changelog.md:449 -msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" - -#: ../../source/ref-changelog.md:451 -msgid "Many Flower code examples received substantial updates." -msgstr "" - -#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 -msgid "**Update Flower Baselines**" -msgstr "" - -#: ../../source/ref-changelog.md:455 -msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" -msgstr "" - -#: ../../source/ref-changelog.md:456 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" -msgstr "" - -#: ../../source/ref-changelog.md:457 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" -msgstr "" - -#: ../../source/ref-changelog.md:458 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "" - -#: ../../source/ref-changelog.md:459 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "" - -#: ../../source/ref-changelog.md:460 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" -msgstr "" - -#: ../../source/ref-changelog.md:462 -msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" #: ../../source/ref-changelog.md:464 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" #: ../../source/ref-changelog.md:466 msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" #: ../../source/ref-changelog.md:468 msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" #: ../../source/ref-changelog.md:470 msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" +msgstr "" + +#: ../../source/ref-changelog.md:472 +msgid "" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" #: ../../source/ref-changelog.md:474 msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" #: ../../source/ref-changelog.md:476 msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" #: ../../source/ref-changelog.md:478 msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" #: ../../source/ref-changelog.md:480 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" #: ../../source/ref-changelog.md:482 msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" #: ../../source/ref-changelog.md:484 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" #: ../../source/ref-changelog.md:486 msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" #: ../../source/ref-changelog.md:488 msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" #: ../../source/ref-changelog.md:490 msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "" - +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" + #: ../../source/ref-changelog.md:492 msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" #: ../../source/ref-changelog.md:494 -msgid "v1.6.0 (2023-11-28)" +msgid "" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:496 +msgid "" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" +msgstr "" + +#: ../../source/ref-changelog.md:498 +msgid "" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" #: ../../source/ref-changelog.md:500 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" +msgstr "" + +#: ../../source/ref-changelog.md:502 +msgid "" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" #: ../../source/ref-changelog.md:504 msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" #: ../../source/ref-changelog.md:506 msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" #: ../../source/ref-changelog.md:508 -msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +msgid "As always, Flower code examples have received many updates." msgstr "" #: ../../source/ref-changelog.md:510 msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "" - -#: ../../source/ref-changelog.md:512 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" #: ../../source/ref-changelog.md:514 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +msgid "**Deprecate Python 3.8 support**" msgstr "" #: ../../source/ref-changelog.md:516 msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" #: ../../source/ref-changelog.md:518 msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" #: ../../source/ref-changelog.md:520 -msgid "Add gRPC request-response capability to the Android SDK." +msgid "" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" #: ../../source/ref-changelog.md:522 msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" #: ../../source/ref-changelog.md:524 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "" - -#: ../../source/ref-changelog.md:526 msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" #: ../../source/ref-changelog.md:528 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" #: ../../source/ref-changelog.md:530 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" #: ../../source/ref-changelog.md:532 msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" #: ../../source/ref-changelog.md:534 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" #: ../../source/ref-changelog.md:536 msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" #: ../../source/ref-changelog.md:538 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" #: ../../source/ref-changelog.md:540 -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:546 msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-changelog.md:550 msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" -msgstr "" - -#: ../../source/ref-changelog.md:550 -msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" #: ../../source/ref-changelog.md:552 msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" #: ../../source/ref-changelog.md:554 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" #: ../../source/ref-changelog.md:556 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" #: ../../source/ref-changelog.md:558 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" #: ../../source/ref-changelog.md:560 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgid "" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" #: ../../source/ref-changelog.md:562 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgid "" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" #: ../../source/ref-changelog.md:564 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" #: ../../source/ref-changelog.md:566 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgid "" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" #: ../../source/ref-changelog.md:568 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgid "" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" #: ../../source/ref-changelog.md:570 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" #: ../../source/ref-changelog.md:572 msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" #: ../../source/ref-changelog.md:574 msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" #: ../../source/ref-changelog.md:576 msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" #: ../../source/ref-changelog.md:578 msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" #: ../../source/ref-changelog.md:580 msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 -#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 -#: ../../source/ref-changelog.md:857 -msgid "Flower received many improvements under the hood, too many to list here." +#: ../../source/ref-changelog.md:582 +msgid "" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:584 msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:586 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:588 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: ../../source/ref-changelog.md:592 +#: ../../source/ref-changelog.md:590 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." -msgstr "" - -#: ../../source/ref-changelog.md:594 -msgid "v1.5.0 (2023-08-31)" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:592 msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:594 msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:596 msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:598 msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/ref-changelog.md:610 -msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +#: ../../source/ref-changelog.md:604 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:610 msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" #: ../../source/ref-changelog.md:614 msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" #: ../../source/ref-changelog.md:616 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" #: ../../source/ref-changelog.md:618 msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" #: ../../source/ref-changelog.md:620 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" #: ../../source/ref-changelog.md:622 msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" #: ../../source/ref-changelog.md:624 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" #: ../../source/ref-changelog.md:626 -msgid "**Deprecate Python 3.7**" +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" #: ../../source/ref-changelog.md:628 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" #: ../../source/ref-changelog.md:630 msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" #: ../../source/ref-changelog.md:632 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" #: ../../source/ref-changelog.md:634 msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" #: ../../source/ref-changelog.md:636 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" #: ../../source/ref-changelog.md:638 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" #: ../../source/ref-changelog.md:640 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" #: ../../source/ref-changelog.md:642 msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" #: ../../source/ref-changelog.md:644 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" #: ../../source/ref-changelog.md:646 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" #: ../../source/ref-changelog.md:648 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" #: ../../source/ref-changelog.md:650 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" #: ../../source/ref-changelog.md:652 msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" #: ../../source/ref-changelog.md:654 -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-changelog.md:656 -msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +#: ../../source/ref-changelog.md:656 ../../source/ref-changelog.md:749 +msgid "**Update Flower Baselines**" msgstr "" #: ../../source/ref-changelog.md:658 msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" +msgstr "" + +#: ../../source/ref-changelog.md:659 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" #: ../../source/ref-changelog.md:660 -msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "" + +#: ../../source/ref-changelog.md:661 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" #: ../../source/ref-changelog.md:662 -msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/ref-changelog.md:664 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:663 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:665 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-changelog.md:668 +#: ../../source/ref-changelog.md:667 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-changelog.md:670 +#: ../../source/ref-changelog.md:669 msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-changelog.md:678 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:671 +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:673 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:677 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:679 msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:681 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:683 msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:685 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:687 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:689 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:691 msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:693 msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:695 msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-changelog.md:708 -msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +#: ../../source/ref-changelog.md:697 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:703 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:707 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:709 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:711 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:713 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:715 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:717 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:719 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:721 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +#: ../../source/ref-changelog.md:723 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:725 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:732 -msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +#: ../../source/ref-changelog.md:727 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:734 +#: ../../source/ref-changelog.md:729 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:742 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:731 +msgid "" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:733 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:735 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:737 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:739 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:741 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:743 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-changelog.md:762 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-changelog.md:745 ../../source/ref-changelog.md:747 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:751 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:753 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:755 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:757 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-changelog.md:772 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-changelog.md:759 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-changelog.md:774 -msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +#: ../../source/ref-changelog.md:761 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/ref-changelog.md:776 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +#: ../../source/ref-changelog.md:763 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-changelog.md:778 -msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +#: ../../source/ref-changelog.md:765 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-changelog.md:780 -msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +#: ../../source/ref-changelog.md:767 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/ref-changelog.md:782 -msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +#: ../../source/ref-changelog.md:769 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/ref-changelog.md:784 -msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +#: ../../source/ref-changelog.md:771 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:773 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:775 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/ref-changelog.md:792 +#: ../../source/ref-changelog.md:777 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:779 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "" - -#: ../../source/ref-changelog.md:800 -msgid "v1.2.0 (2023-01-13)" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-changelog.md:806 +#: ../../source/ref-changelog.md:781 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:783 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +#: ../../source/ref-changelog.md:785 ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:1060 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:789 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:791 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:793 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:795 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/ref-changelog.md:822 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/ref-changelog.md:797 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:803 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:807 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-changelog.md:825 +#: ../../source/ref-changelog.md:809 msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-changelog.md:827 +#: ../../source/ref-changelog.md:811 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-changelog.md:829 +#: ../../source/ref-changelog.md:813 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:815 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:817 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-changelog.md:835 +#: ../../source/ref-changelog.md:819 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:837 +#: ../../source/ref-changelog.md:821 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/ref-changelog.md:839 +#: ../../source/ref-changelog.md:823 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:825 msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:827 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/ref-changelog.md:845 -msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +#: ../../source/ref-changelog.md:829 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:831 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:833 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:835 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/ref-changelog.md:853 +#: ../../source/ref-changelog.md:837 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/ref-changelog.md:855 +#: ../../source/ref-changelog.md:839 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:841 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:843 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" -msgstr "" - -#: ../../source/ref-changelog.md:869 -msgid "v1.1.0 (2022-10-31)" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:845 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:847 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:849 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:881 +#: ../../source/ref-changelog.md:851 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:853 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:855 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:857 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:859 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:861 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:863 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." +msgstr "" + +#: ../../source/ref-changelog.md:865 +msgid "" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "" + +#: ../../source/ref-changelog.md:867 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "" + +#: ../../source/ref-changelog.md:869 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" +msgstr "" + +#: ../../source/ref-changelog.md:871 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." +msgstr "" + +#: ../../source/ref-changelog.md:873 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" +msgstr "" + +#: ../../source/ref-changelog.md:881 +msgid "v1.4.0 (2023-04-21)" +msgstr "" + +#: ../../source/ref-changelog.md:887 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +msgstr "" + +#: ../../source/ref-changelog.md:891 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" + +#: ../../source/ref-changelog.md:893 +msgid "" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" #: ../../source/ref-changelog.md:895 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" #: ../../source/ref-changelog.md:897 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" #: ../../source/ref-changelog.md:899 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" #: ../../source/ref-changelog.md:901 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" #: ../../source/ref-changelog.md:903 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" #: ../../source/ref-changelog.md:905 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" #: ../../source/ref-changelog.md:907 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" #: ../../source/ref-changelog.md:909 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" #: ../../source/ref-changelog.md:911 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" #: ../../source/ref-changelog.md:913 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" #: ../../source/ref-changelog.md:915 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" #: ../../source/ref-changelog.md:917 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" #: ../../source/ref-changelog.md:919 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" #: ../../source/ref-changelog.md:921 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" #: ../../source/ref-changelog.md:923 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" #: ../../source/ref-changelog.md:925 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." -msgstr "" - -#: ../../source/ref-changelog.md:931 -msgid "v1.0.0 (2022-07-28)" -msgstr "" - -#: ../../source/ref-changelog.md:933 -msgid "Highlights" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-changelog.md:935 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:927 +msgid "" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-changelog.md:936 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/ref-changelog.md:929 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-changelog.md:937 -msgid "Configurable `get_parameters`" +#: ../../source/ref-changelog.md:931 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/ref-changelog.md:938 +#: ../../source/ref-changelog.md:933 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/ref-changelog.md:942 +#: ../../source/ref-changelog.md:935 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/ref-changelog.md:944 +#: ../../source/ref-changelog.md:937 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/ref-changelog.md:948 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +#: ../../source/ref-changelog.md:945 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/ref-changelog.md:950 +#: ../../source/ref-changelog.md:951 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:952 +#: ../../source/ref-changelog.md:955 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/ref-changelog.md:954 +#: ../../source/ref-changelog.md:957 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/ref-changelog.md:956 +#: ../../source/ref-changelog.md:959 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:961 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-changelog.md:960 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/ref-changelog.md:963 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/ref-changelog.md:961 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/ref-changelog.md:965 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/ref-changelog.md:962 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/ref-changelog.md:967 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:969 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:971 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/ref-changelog.md:968 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-changelog.md:973 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:975 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:977 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:979 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:981 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:983 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:985 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:987 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/ref-changelog.md:984 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +#: ../../source/ref-changelog.md:989 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:991 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:995 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:997 ../../source/ref-changelog.md:1064 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/ref-changelog.md:992 -msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +#: ../../source/ref-changelog.md:1003 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:1009 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/ref-changelog.md:996 +#: ../../source/ref-changelog.md:1013 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-changelog.md:998 +#: ../../source/ref-changelog.md:1015 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-changelog.md:1000 +#: ../../source/ref-changelog.md:1017 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/ref-changelog.md:1002 +#: ../../source/ref-changelog.md:1019 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/ref-changelog.md:1004 +#: ../../source/ref-changelog.md:1021 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/ref-changelog.md:1008 +#: ../../source/ref-changelog.md:1023 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/ref-changelog.md:1010 +#: ../../source/ref-changelog.md:1025 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1012 +#: ../../source/ref-changelog.md:1026 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1014 +#: ../../source/ref-changelog.md:1027 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1016 +#: ../../source/ref-changelog.md:1028 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1018 +#: ../../source/ref-changelog.md:1030 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-changelog.md:1020 +#: ../../source/ref-changelog.md:1032 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/ref-changelog.md:1022 +#: ../../source/ref-changelog.md:1034 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "" - -#: ../../source/ref-changelog.md:1024 -msgid "`scikit-learn`" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/ref-changelog.md:1025 -msgid "`simulation_pytorch`" +#: ../../source/ref-changelog.md:1036 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-changelog.md:1026 -msgid "`quickstart_pytorch`" +#: ../../source/ref-changelog.md:1038 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/ref-changelog.md:1027 -msgid "`quickstart_simulation`" +#: ../../source/ref-changelog.md:1040 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-changelog.md:1028 -msgid "`quickstart_tensorflow`" +#: ../../source/ref-changelog.md:1042 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/ref-changelog.md:1029 -msgid "`advanced_tensorflow`" +#: ../../source/ref-changelog.md:1044 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/ref-changelog.md:1031 +#: ../../source/ref-changelog.md:1046 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:1048 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:1050 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:1052 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 -#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 -msgid "**Minor updates**" +#: ../../source/ref-changelog.md:1054 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:1056 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:1042 +#: ../../source/ref-changelog.md:1058 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-changelog.md:1043 +#: ../../source/ref-changelog.md:1062 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-changelog.md:1044 +#: ../../source/ref-changelog.md:1066 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-changelog.md:1046 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:1072 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-changelog.md:1050 +#: ../../source/ref-changelog.md:1076 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:1052 +#: ../../source/ref-changelog.md:1078 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/ref-changelog.md:1054 +#: ../../source/ref-changelog.md:1082 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:1084 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:1086 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:1088 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/ref-changelog.md:1062 +#: ../../source/ref-changelog.md:1090 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/ref-changelog.md:1064 +#: ../../source/ref-changelog.md:1092 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:1094 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:1096 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1098 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:1100 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:1102 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/ref-changelog.md:1076 +#: ../../source/ref-changelog.md:1104 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/ref-changelog.md:1078 +#: ../../source/ref-changelog.md:1106 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1108 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:1082 +#: ../../source/ref-changelog.md:1110 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:1084 +#: ../../source/ref-changelog.md:1112 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:1086 +#: ../../source/ref-changelog.md:1114 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:1088 +#: ../../source/ref-changelog.md:1116 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:1090 +#: ../../source/ref-changelog.md:1118 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:1092 +#: ../../source/ref-changelog.md:1120 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:1096 +#: ../../source/ref-changelog.md:1122 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:1097 +#: ../../source/ref-changelog.md:1124 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/ref-changelog.md:1098 +#: ../../source/ref-changelog.md:1126 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:1099 +#: ../../source/ref-changelog.md:1128 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:1100 -msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +#: ../../source/ref-changelog.md:1134 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:1104 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +#: ../../source/ref-changelog.md:1136 +msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:1105 -msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +#: ../../source/ref-changelog.md:1138 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:1106 -msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +#: ../../source/ref-changelog.md:1139 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:1107 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:1140 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:1108 +#: ../../source/ref-changelog.md:1141 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/ref-changelog.md:1109 +#: ../../source/ref-changelog.md:1145 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/ref-changelog.md:1110 +#: ../../source/ref-changelog.md:1147 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/ref-changelog.md:1111 +#: ../../source/ref-changelog.md:1151 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/ref-changelog.md:1113 -msgid "v0.18.0 (2022-02-28)" +#: ../../source/ref-changelog.md:1153 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/ref-changelog.md:1117 +#: ../../source/ref-changelog.md:1155 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:1119 +#: ../../source/ref-changelog.md:1157 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:1159 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1123 +#: ../../source/ref-changelog.md:1161 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:1125 -msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +#: ../../source/ref-changelog.md:1163 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:1127 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +#: ../../source/ref-changelog.md:1164 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:1129 -msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +#: ../../source/ref-changelog.md:1165 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:1167 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:1169 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:1135 -msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +#: ../../source/ref-changelog.md:1171 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:1137 +#: ../../source/ref-changelog.md:1173 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:1175 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:1141 +#: ../../source/ref-changelog.md:1177 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1143 +#: ../../source/ref-changelog.md:1179 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:1145 +#: ../../source/ref-changelog.md:1181 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:1183 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:1185 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:1151 -msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +#: ../../source/ref-changelog.md:1187 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/ref-changelog.md:1153 +#: ../../source/ref-changelog.md:1189 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:1155 +#: ../../source/ref-changelog.md:1191 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:1157 +#: ../../source/ref-changelog.md:1193 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:1159 +#: ../../source/ref-changelog.md:1195 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:1161 +#: ../../source/ref-changelog.md:1197 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:1165 +#: ../../source/ref-changelog.md:1199 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:1166 +#: ../../source/ref-changelog.md:1201 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:1167 +#: ../../source/ref-changelog.md:1203 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:1168 +#: ../../source/ref-changelog.md:1205 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:1169 +#: ../../source/ref-changelog.md:1207 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:1170 +#: ../../source/ref-changelog.md:1211 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:1171 +#: ../../source/ref-changelog.md:1213 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:1175 +#: ../../source/ref-changelog.md:1215 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:1177 +#: ../../source/ref-changelog.md:1217 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." -msgstr "" - -#: ../../source/ref-changelog.md:1179 -msgid "v0.17.0 (2021-09-24)" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:1183 +#: ../../source/ref-changelog.md:1219 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:1185 +#: ../../source/ref-changelog.md:1221 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:1187 +#: ../../source/ref-changelog.md:1223 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:1189 +#: ../../source/ref-changelog.md:1225 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-changelog.md:1191 -msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +#: ../../source/ref-changelog.md:1227 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:1192 -msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +#: ../../source/ref-changelog.md:1228 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1194 -msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +#: ../../source/ref-changelog.md:1229 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1196 -msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +#: ../../source/ref-changelog.md:1230 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:1198 -msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +#: ../../source/ref-changelog.md:1231 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:1200 -msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +#: ../../source/ref-changelog.md:1232 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:1204 +#: ../../source/ref-changelog.md:1234 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:1205 +#: ../../source/ref-changelog.md:1236 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1206 +#: ../../source/ref-changelog.md:1238 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:1207 +#: ../../source/ref-changelog.md:1240 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:1208 -msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +#: ../../source/ref-changelog.md:1242 ../../source/ref-changelog.md:1297 +#: ../../source/ref-changelog.md:1366 ../../source/ref-changelog.md:1405 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:1212 +#: ../../source/ref-changelog.md:1244 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:1214 +#: ../../source/ref-changelog.md:1245 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:1216 +#: ../../source/ref-changelog.md:1246 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:1218 +#: ../../source/ref-changelog.md:1247 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:1249 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/ref-changelog.md:1220 +#: ../../source/ref-changelog.md:1253 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:1222 +#: ../../source/ref-changelog.md:1255 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:1224 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:1257 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:1228 +#: ../../source/ref-changelog.md:1259 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:1230 -msgid "(abstract) FedOpt" +#: ../../source/ref-changelog.md:1261 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:1233 +#: ../../source/ref-changelog.md:1263 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:1235 +#: ../../source/ref-changelog.md:1265 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:1237 +#: ../../source/ref-changelog.md:1267 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:1239 +#: ../../source/ref-changelog.md:1269 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:1241 +#: ../../source/ref-changelog.md:1271 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:1243 +#: ../../source/ref-changelog.md:1273 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:1245 +#: ../../source/ref-changelog.md:1275 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:1247 +#: ../../source/ref-changelog.md:1277 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:1249 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:1279 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1281 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:1255 +#: ../../source/ref-changelog.md:1283 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:1257 +#: ../../source/ref-changelog.md:1285 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:1259 +#: ../../source/ref-changelog.md:1287 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:1261 +#: ../../source/ref-changelog.md:1289 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:1263 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:1291 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:1267 +#: ../../source/ref-changelog.md:1293 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:1269 +#: ../../source/ref-changelog.md:1295 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-changelog.md:1271 +#: ../../source/ref-changelog.md:1299 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-changelog.md:1290 +#: ../../source/ref-changelog.md:1300 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-changelog.md:1294 +#: ../../source/ref-changelog.md:1301 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-changelog.md:1296 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:1302 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-changelog.md:1300 +#: ../../source/ref-changelog.md:1303 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-changelog.md:1302 +#: ../../source/ref-changelog.md:1307 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-changelog.md:1304 +#: ../../source/ref-changelog.md:1308 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-changelog.md:1306 +#: ../../source/ref-changelog.md:1309 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-changelog.md:1308 +#: ../../source/ref-changelog.md:1310 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:1323 +#: ../../source/ref-changelog.md:1311 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-changelog.md:1325 +#: ../../source/ref-changelog.md:1312 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1327 +#: ../../source/ref-changelog.md:1313 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1329 +#: ../../source/ref-changelog.md:1314 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1346 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:1316 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-changelog.md:1350 +#: ../../source/ref-changelog.md:1320 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-changelog.md:1351 -msgid "Improved documentation" +#: ../../source/ref-changelog.md:1322 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/ref-changelog.md:1352 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:1324 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-changelog.md:1353 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:1326 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-changelog.md:1354 +#: ../../source/ref-changelog.md:1328 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-changelog.md:1355 +#: ../../source/ref-changelog.md:1330 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/ref-changelog.md:1357 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:1332 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-changelog.md:1359 +#: ../../source/ref-changelog.md:1334 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-changelog.md:1361 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:1336 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 -msgid "Important changes:" +#: ../../source/ref-changelog.md:1338 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-changelog.md:1365 +#: ../../source/ref-changelog.md:1340 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-changelog.md:1366 +#: ../../source/ref-changelog.md:1342 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-changelog.md:1367 +#: ../../source/ref-changelog.md:1344 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-changelog.md:1369 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:1346 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-changelog.md:1371 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:1348 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-changelog.md:1373 +#: ../../source/ref-changelog.md:1350 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-changelog.md:1374 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:1352 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-changelog.md:1375 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:1354 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-changelog.md:1376 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:1356 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:1377 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:1358 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:1381 +#: ../../source/ref-changelog.md:1360 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:1382 +#: ../../source/ref-changelog.md:1362 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:1383 +#: ../../source/ref-changelog.md:1364 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1368 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:1385 +#: ../../source/ref-changelog.md:1369 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:1370 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:1371 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-example-projects.rst:9 -msgid "The following examples are available as standalone projects." +#: ../../source/ref-changelog.md:1372 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-example-projects.rst:12 -#, fuzzy -msgid "Quickstart TensorFlow/Keras" -msgstr "빠른 시작 튜토리얼" - -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:1373 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:1374 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-example-projects.rst:19 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:1378 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:20 +#: ../../source/ref-changelog.md:1380 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-example-projects.rst:24 -#: ../../source/tutorial-quickstart-pytorch.rst:4 -msgid "Quickstart PyTorch" +#: ../../source/ref-changelog.md:1382 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:1386 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:1388 msgid "" -"`Quickstart PyTorch (Code) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:31 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" -msgstr "" - -#: ../../source/ref-example-projects.rst:34 -msgid "PyTorch: From Centralized To Federated" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:1390 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:1392 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-example-projects.rst:40 +#: ../../source/ref-changelog.md:1394 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" -msgstr "" - -#: ../../source/ref-example-projects.rst:44 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:1395 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-example-projects.rst:49 +#: ../../source/ref-changelog.md:1397 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-example-projects.rst:51 +#: ../../source/ref-changelog.md:1399 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:1401 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:1403 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-faq.rst:9 +#: ../../source/ref-changelog.md:1407 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:1408 msgid "" -"`Flower simulation PyTorch " -"`_" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-faq.rst:12 +#: ../../source/ref-changelog.md:1409 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:1410 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-faq.rst:16 +#: ../../source/ref-changelog.md:1411 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:1415 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-faq.rst:20 +#: ../../source/ref-changelog.md:1417 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-changelog.md:1419 msgid "" -"`Android Kotlin example `_" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-faq.rst:23 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:1421 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:1423 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-faq.rst:27 +#: ../../source/ref-changelog.md:1425 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-faq.rst:30 -msgid "`FLock: A Decentralised AI Training Platform `_." +#: ../../source/ref-changelog.md:1427 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-faq.rst:30 -msgid "Contribute to on-chain training the model and earn rewards." +#: ../../source/ref-changelog.md:1431 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-faq.rst:31 -msgid "Local blockchain with federated learning simulation." +#: ../../source/ref-changelog.md:1433 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:1436 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-changelog.md:1438 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-faq.rst:34 +#: ../../source/ref-changelog.md:1440 msgid "" -"`Flower meets KOSMoS `_." +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-changelog.md:1442 msgid "" -"`Flower meets Talan blog post `_ ." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-faq.rst:36 +#: ../../source/ref-changelog.md:1444 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/ref-changelog.md:1446 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:1448 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:1450 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:1452 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:1454 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:1458 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:1460 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:1462 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:1464 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/ref-changelog.md:1466 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:1470 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:1472 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/ref-changelog.md:1474 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/ref-changelog.md:1493 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:1497 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:1499 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:1503 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:1505 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:1507 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:1509 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:1511 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:1526 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:1528 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:1530 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:1532 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/ref-changelog.md:1549 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:1553 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-telemetry.md:58 -msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +#: ../../source/ref-changelog.md:1554 +msgid "Improved documentation" msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/ref-changelog.md:1555 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-telemetry.md:66 -msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +#: ../../source/ref-changelog.md:1556 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:1557 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:4 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:1558 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:9 -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +#: ../../source/ref-changelog.md:1560 +msgid "Bugfix:" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:11 +#: ../../source/ref-changelog.md:1562 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:4 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:1564 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:6 -msgid "" -"In this federated learning tutorial we will learn how to train a " -"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `." +#: ../../source/ref-changelog.md:1566 ../../source/ref-changelog.md:1582 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 -msgid "Then, clone the code example directly from GitHub:" +#: ../../source/ref-changelog.md:1568 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:18 +#: ../../source/ref-changelog.md:1569 msgid "" -"This will create a new directory called `quickstart-fastai` containing " -"the following files:" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:31 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 -#, fuzzy -msgid "Next, activate your environment, then run:" -msgstr "그 후 가상 환경을 활성화합니다:" - -#: ../../source/tutorial-quickstart-fastai.rst:41 +#: ../../source/ref-changelog.md:1570 msgid "" -"This example by default runs the Flower Simulation Engine, creating a " -"federation of 10 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" Let's run the project:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:54 -#: ../../source/tutorial-quickstart-huggingface.rst:61 -#: ../../source/tutorial-quickstart-mlx.rst:60 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 -#: ../../source/tutorial-quickstart-pytorch.rst:62 -#: ../../source/tutorial-quickstart-tensorflow.rst:62 -msgid "With default arguments you will see an output like this one:" +#: ../../source/ref-changelog.md:1572 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:98 -#: ../../source/tutorial-quickstart-huggingface.rst:112 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 -#: ../../source/tutorial-quickstart-pytorch.rst:103 -#: ../../source/tutorial-quickstart-tensorflow.rst:103 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +#: ../../source/ref-changelog.md:1574 +msgid "Incompatible changes:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:108 +#: ../../source/ref-changelog.md:1576 msgid "" -"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " -"in the Flower GitHub repository." +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +#: ../../source/ref-changelog.md:1577 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:4 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:1578 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:6 -msgid "" -"In this federated learning tutorial we will learn how to train a large " -"language model (LLM) on the `IMDB " -"`_ dataset using Flower" -" and the 🤗 Hugging Face Transformers library. It is recommended to create" -" a virtual environment and run everything within a :doc:`virtualenv " -"`." +#: ../../source/ref-changelog.md:1579 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 -msgid "" -"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " -"project. It will generate all the files needed to run, by default with " -"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " -"The dataset will be partitioned using |flowerdatasets|_'s " -"|iidpartitioner|_." +#: ../../source/ref-changelog.md:1580 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:17 -#: ../../source/tutorial-quickstart-mlx.rst:17 -#: ../../source/tutorial-quickstart-pytorch.rst:18 -#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#: ../../source/ref-changelog.md:1584 msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:25 +#: ../../source/ref-changelog.md:1585 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``HuggingFace``), give a name to your " -"project, and type in your developer name:" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -#: ../../source/tutorial-quickstart-mlx.rst:32 -#: ../../source/tutorial-quickstart-pytorch.rst:34 -#: ../../source/tutorial-quickstart-tensorflow.rst:34 +#: ../../source/ref-changelog.md:1586 msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:47 -#: ../../source/tutorial-quickstart-mlx.rst:46 -#: ../../source/tutorial-quickstart-pytorch.rst:48 -#: ../../source/tutorial-quickstart-tensorflow.rst:48 +#: ../../source/ref-changelog.md:1587 msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:54 -#: ../../source/tutorial-quickstart-pytorch.rst:55 -#: ../../source/tutorial-quickstart-tensorflow.rst:55 -msgid "To run the project, do:" +#: ../../source/ref-changelog.md:1588 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:102 -msgid "You can also run the project with GPU as follows:" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:109 +#: ../../source/ref-example-projects.rst:4 msgid "" -"This will use the default arguments where each ``ClientApp`` will use 2 " -"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:120 -#: ../../source/tutorial-quickstart-mlx.rst:110 -#: ../../source/tutorial-quickstart-pytorch.rst:111 -msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:115 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#: ../../source/ref-example-projects.rst:12 #, fuzzy -msgid "The Data" -msgstr "Metadata" +msgid "Quickstart TensorFlow/Keras" +msgstr "빠른 시작 튜토리얼" -#: ../../source/tutorial-quickstart-huggingface.rst:126 +#: ../../source/ref-example-projects.rst:14 msgid "" -"This tutorial uses |flowerdatasets|_ to easily download and partition the" -" `IMDB `_ dataset. In " -"this example you'll make use of the |iidpartitioner|_ to generate " -"``num_partitions`` partitions. You can choose |otherpartitioners|_ " -"available in Flower Datasets. To tokenize the text, we will also load the" -" tokenizer from the pre-trained Transformer model that we'll use during " -"training - more on that in the next section. Each ``ClientApp`` will call" -" this function to create dataloaders with the data that correspond to " -"their data partition." +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:171 -#: ../../source/tutorial-quickstart-mlx.rst:155 -#: ../../source/tutorial-quickstart-pytorch.rst:150 -#: ../../source/tutorial-quickstart-tensorflow.rst:139 -msgid "The Model" +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:173 +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:20 msgid "" -"We will leverage 🤗 Hugging Face to federate the training of language " -"models over multiple clients using Flower. More specifically, we will " -"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " -"classification over the dataset of IMDB ratings. The end goal is to " -"detect if a movie rating is positive or negative. If you have access to " -"larger GPUs, feel free to use larger models!" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:185 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "" + +#: ../../source/ref-example-projects.rst:26 msgid "" -"Note that here, ``model_name`` is a string that will be loaded from the " -"``Context`` in the ClientApp and ServerApp." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:188 +#: ../../source/ref-example-projects.rst:29 msgid "" -"In addition to loading the pretrained model weights and architecture, we " -"also include two utility functions to perform both training (i.e. " -"``train()``) and evaluation (i.e. ``test()``) using the above model. " -"These functions should look fairly familiar if you have some prior " -"experience with PyTorch. Note these functions do not have anything " -"specific to Flower. That being said, the training function will normally " -"be called, as we'll see later, from a Flower client passing its own data." -" In summary, your clients can use standard training/testing functions to " -"perform local training or evaluation:" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:228 -#: ../../source/tutorial-quickstart-mlx.rst:199 -#: ../../source/tutorial-quickstart-pytorch.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:168 -#, fuzzy -msgid "The ClientApp" -msgstr "클라이언트앱" +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:230 -msgid "" -"The main changes we have to make to use 🤗 Hugging Face with Flower will " -"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " -"the hood, the ``transformers`` library uses PyTorch, which means we can " -"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" -" the :doc:`Quickstart PyTorch ` tutorial. As" -" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" -" and represented as a list of NumPy arrays. The ``set_weights()`` " -"function that's the opposite: given a list of NumPy arrays it applies " -"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:241 -#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/ref-example-projects.rst:36 msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:257 -#: ../../source/tutorial-quickstart-pytorch.rst:250 +#: ../../source/ref-example-projects.rst:38 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:283 +#: ../../source/ref-example-projects.rst:40 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additional hyperparameters in ``pyproject.toml`` and access them here." +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:316 -#: ../../source/tutorial-quickstart-mlx.rst:361 -#: ../../source/tutorial-quickstart-pytorch.rst:307 -#: ../../source/tutorial-quickstart-tensorflow.rst:232 -#, fuzzy -msgid "The ServerApp" -msgstr "Flower 서버앱" +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:318 +#: ../../source/ref-example-projects.rst:46 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"|serverappcomponents|_ as opposed to a |client|_ In this example we use " -"the `FedAvg` strategy. To it we pass a randomly initialized model that " -"will server as the global model to federated. Note that the value of " -"``fraction_fit`` is read from the run config. You can find the default " -"value defined in the ``pyproject.toml``." +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:356 +#: ../../source/ref-example-projects.rst:49 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system for an LLM." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:361 +#: ../../source/ref-example-projects.rst:51 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_hf_link|_ in the Flower GitHub repository. For a " -"comprehensive example of a federated fine-tuning of an LLM with Flower, " -"refer to the |flowertune|_ example in the Flower GitHub repository." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "자주 묻는 질문" + +#: ../../source/ref-faq.rst:4 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:4 -msgid "Quickstart iOS" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:9 +#: ../../source/ref-faq.rst:9 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-faq.rst:11 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-faq.rst:12 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:20 -msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:26 +#: ../../source/ref-faq.rst:16 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:33 -msgid "Or Poetry:" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:40 -#: ../../source/tutorial-quickstart-scikitlearn.rst:43 -#: ../../source/tutorial-quickstart-xgboost.rst:65 -msgid "Flower Client" +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:42 +#: ../../source/ref-faq.rst:22 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:80 -msgid "" -"Let's create a new application project in Xcode and add ``flwr`` as a " -"dependency in your project. For our application, we will store the logic " -"of our app in ``FLiOSModel.swift`` and the UI elements in " -"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" -" quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:86 -msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:94 +#: ../../source/ref-faq.rst:27 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" -" ``MLBatchProvider`` object. The preprocessing is done inside " -"``DataLoader.swift``." +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:112 -msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -"``MLModelInspect``." +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:118 -msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:133 -msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function ``startFlwrGRPC``." +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:141 +#: ../../source/ref-faq.rst:32 msgid "" -"That's it for the client. We only have to implement ``Client`` or call " -"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" -" ``hostname`` and ``port`` tells the client which server to connect to. " -"This can be done by entering the hostname and port in the application " -"before clicking the start button to start the federated learning process." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:148 -#: ../../source/tutorial-quickstart-scikitlearn.rst:179 -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "Flower Server" +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:150 +#: ../../source/ref-faq.rst:34 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -"``server.py``, import Flower and start the server:" +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:254 -msgid "Train the model, federated!" +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:163 -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-faq.rst:36 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:171 -msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:177 +#: ../../source/ref-telemetry.md:3 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in ``examples/ios``." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-telemetry.md:5 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:4 -msgid "Quickstart JAX" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:9 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. JAX를 사용해 " -"scikit-learn 데이터 세트에서 선형 회귀 모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 " -"Federated으로 `_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 회귀 " -"`_" -" 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 다음 centralized 트레이닝 코드를 기반으로" -" federated 방식으로 트레이닝을 실행합니다." -#: ../../source/tutorial-quickstart-jax.rst:20 -#, fuzzy +#: ../../source/ref-telemetry.md:11 msgid "" -"Before we start building our JAX example, we need install the packages " -"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, " -":code:`flwr` 패키지를 설치해야 합니다:" - -#: ../../source/tutorial-quickstart-jax.rst:28 -msgid "Linear Regression with JAX" -msgstr "JAX를 사용한 선형 회귀" -#: ../../source/tutorial-quickstart-jax.rst:30 -#, fuzzy +#: ../../source/ref-telemetry.md:12 msgid "" -"We begin with a brief description of the centralized training code based " -"on a ``Linear Regression`` model. If you want a more in-depth explanation" -" of what's going on then have a look at the official `JAX documentation " -"`_." +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 간략한 설명부터 시작하겠습니다. 더 자세한 설명을" -" 원하시면 공식 `JAX 문서 `_를 참조하세요." -#: ../../source/tutorial-quickstart-jax.rst:34 -#, fuzzy +#: ../../source/ref-telemetry.md:13 msgid "" -"Let's create a new file called ``jax_training.py`` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " -"imported. In addition, we need to import ``sklearn`` since we use " -"``make_regression`` for the dataset and ``train_test_split`` to split the" -" dataset into a training and test set. You can see that we do not yet " -"import the ``flwr`` package for federated learning. This will be done " -"later." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 :code:`jax_training.py`라는 새 파일을 " -"생성해 보겠습니다. 먼저, JAX 패키지인 :code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에" -" :code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 분할하기 위해 " -":code:`train_test_split`을 사용하므로 :code:`sklearn`을 가져와야 합니다. 연합 학습을 위해 아직 " -":code:`flwr` 패키지를 가져오지 않은 것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." - -#: ../../source/tutorial-quickstart-jax.rst:51 -#, fuzzy -msgid "The ``load_data()`` function loads the mentioned training and test sets." -msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/tutorial-quickstart-jax.rst:63 -#, fuzzy +#: ../../source/ref-telemetry.md:14 msgid "" -"The model architecture (a very simple ``Linear Regression`` model) is " -"defined in ``load_model()``." -msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:73 -#, fuzzy -msgid "" -"We now need to define the training (function ``train()``), which loops " -"over the training set and measures the loss (function ``loss_fn()``) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a ``grad()`` function (defined in the ``main()`` " -"function and called in ``train()``)." +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수 " -":code:`train()`)을 정의해야 합니다. JAX는 :code:`grad()` 함수(:code:`main()` 함수에 " -"정의되고 :code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:95 -#, fuzzy +#: ../../source/ref-telemetry.md:18 msgid "" -"The evaluation of the model is defined in the function ``evaluation()``. " -"The function takes all test examples and measures the loss of the linear " -"regression model." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 예제를 가져와 선형 회귀 " -"모델의 손실을 측정합니다." -#: ../../source/tutorial-quickstart-jax.rst:107 -#, fuzzy +#: ../../source/ref-telemetry.md:24 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the ``jax.grad()`` function is defined in " -"``main()`` and passed to ``train()``." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하여 JAX를 사용 모델을 훈련할 수 있습니다. 이미" -" 언급했듯이 :code:`jax.grad()` 함수는 :code:`main()`에 정의되어 :code:`train()`에 " -"전달됩니다." -#: ../../source/tutorial-quickstart-jax.rst:126 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" - -#: ../../source/tutorial-quickstart-jax.rst:132 -msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" msgstr "" -"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 " -"사용하여 하나의 서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/tutorial-quickstart-jax.rst:137 -msgid "JAX meets Flower" -msgstr "JAX와 Flower의 만남" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:139 -#, fuzzy +#: ../../source/ref-telemetry.md:30 msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -"``jax_training.py`` for the *clients* that are connected to the *server*." -" The *server* sends model parameters to the clients. The *clients* run " -"the training and update the parameters. The updated parameters are sent " -"back to the *server*, which averages all received parameter updates. This" -" describes one round of the federated learning process, and we repeat " -"this for multiple rounds." +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 " -":code:`jax_training.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " -"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 " -"업데이트의 평균을 구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 라운드에 걸쳐 반복합니다." -#: ../../source/tutorial-quickstart-jax.rst:167 -#, fuzzy +#: ../../source/ref-telemetry.md:32 msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined JAX training in ``jax_training.py``. Our " -"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " -"update the parameters on our JAX model:" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`jax_training.py`에서 " -"이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를" -" 업데이트하기 위해 :code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/tutorial-quickstart-jax.rst:182 -#, fuzzy +#: ../../source/ref-telemetry.md:34 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" -" than ``Client`` if you use a framework with good NumPy interoperability " -"(like JAX) because it avoids some of the boilerplate that would otherwise" -" be necessary. ``FlowerClient`` needs to implement four methods, two " -"methods for getting/setting model parameters, one method for training the" -" model, and one method for testing the model:" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " -":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 구현은 " -":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`FlowerClient`라고 부를 " -"것입니다. :code:`NumPyClient`는 필요한 일부 보일러플레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 " -"프레임워크(예: JAX)를 사용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. " -"code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " -"테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" - -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "``set_parameters (optional)``" -msgstr ":code:`set_parameters (선택사항)`" - -#: ../../source/tutorial-quickstart-jax.rst:193 -#, fuzzy -msgid "transform parameters to NumPy ``ndarray``'s" -msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" - -#: ../../source/tutorial-quickstart-jax.rst:203 -msgid "get the updated local model parameters and return them to the server" -msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" -#: ../../source/tutorial-quickstart-jax.rst:208 -msgid "return the local loss to the server" -msgstr "로컬 손실을 서버로 반환합니다" - -#: ../../source/tutorial-quickstart-jax.rst:210 -#, fuzzy +#: ../../source/ref-telemetry.md:36 msgid "" -"The challenging part is to transform the JAX model parameters from " -"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " -"`NumPyClient`." +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여 " -"`NumPyClient`와 호환되도록 하는 것입니다." -#: ../../source/tutorial-quickstart-jax.rst:213 -#, fuzzy +#: ../../source/ref-telemetry.md:38 msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``evaluate()`` previously defined in " -"``jax_training.py``. So what we really do here is we tell Flower through " -"our ``NumPyClient`` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give " -"you a better understanding of the data types that get passed around." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " -":code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`를 " -"사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " -":code:`NumPyClient` 서브클래스를 통해 Flower에게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 " -"있도록 유형 type annotation을 포함했습니다." - -#: ../../source/tutorial-quickstart-jax.rst:286 -msgid "Having defined the federation process, we can run it." -msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:315 +#: ../../source/ref-telemetry.md:40 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX 프로젝트를 확인합니다. " -"축하합니다!" -#: ../../source/tutorial-quickstart-jax.rst:321 +#: ../../source/ref-telemetry.md:42 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습니다: 'Quickstart JAX " -"`_. 두 " -"클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예제는 다소 단순화되어 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:325 +#: ../../source/ref-telemetry.md:44 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 집합을 사용해 보는 것은 어떨까요? " -"클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/tutorial-quickstart-mlx.rst:4 -#, fuzzy -msgid "Quickstart MLX" -msgstr "빠른 시작" - -#: ../../source/tutorial-quickstart-mlx.rst:6 -msgid "" -"In this federated learning tutorial we will learn how to train simple MLP" -" on MNIST using Flower and MLX. It is recommended to create a virtual " -"environment and run everything within a :doc:`virtualenv `." -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:10 +#: ../../source/ref-telemetry.md:46 msgid "" -"Let's use `flwr new` to create a complete Flower+MLX project. It will " -"generate all the files needed to run, by default with the Simulation " -"Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:25 +#: ../../source/ref-telemetry.md:48 msgid "" -"Then, run the command below. You will be prompted to select of the " -"available templates (choose ``MLX``), give a name to your project, and " -"type in your developer name:" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:53 -msgid "To run the project do:" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:102 +#: ../../source/ref-telemetry.md:52 msgid "" -"You can also override the parameters defined in " -"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:116 +#: ../../source/ref-telemetry.md:58 msgid "" -"We will use `Flower Datasets `_ to " -"easily download and partition the `MNIST` dataset. In this example you'll" -" make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets:" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:157 -msgid "" -"We define the model as in the `centralized MLX example " -"`_, it's a " -"simple MLP:" +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:180 +#: ../../source/ref-telemetry.md:66 msgid "" -"We also define some utility functions to test our model and to iterate " -"over batches." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:201 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"The main changes we have to make to use `MLX` with `Flower` will be found" -" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " -"doesn't provide an easy way to convert the model parameters into a list " -"of ``np.array`` objects (the format we need for the serialization of the " -"messages to work)." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:206 -msgid "The way MLX stores its parameters is as follows:" +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:219 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" -"Therefore, to get our list of ``np.array`` objects, we need to extract " -"each array and convert them into a NumPy array:" +"The experimental Flower Android SDK is not compatible with the latest " +"version of Flower. Android support is currently being reworked and will " +"be released in 2025." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:228 +#: ../../source/tutorial-quickstart-android.rst:14 msgid "" -"For the ``set_params()`` function, we perform the reverse operation. We " -"receive a list of NumPy arrays and want to convert them into MLX " -"parameters. Therefore, we iterate through pairs of parameters and assign " -"them to the `weight` and `bias` keys of each layer dict:" +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new Android SDK is released." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:243 +#: ../../source/tutorial-quickstart-android.rst:17 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset:" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:259 +#: ../../source/tutorial-quickstart-android.rst:19 msgid "" -"Here, after updating the parameters, we perform the training as in the " -"centralized case, and return the new parameters." +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:262 -msgid "And for the ``evaluate()`` method of the client:" +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" -"We also begin by updating the parameters with the ones sent by the " -"server, and then we compute the loss and accuracy using the functions " -"defined above. In the constructor of the ``FlowerClient`` we instantiate " -"the `MLP` model as well as other components such as the optimizer." +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:277 -msgid "Putting everything together we have:" +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:331 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that " -"``context`` enables you to get access to hyperparemeters defined in " -"``pyproject.toml`` to configure the run. In this tutorial we access, " -"among other hyperparameters, the ``local-epochs`` setting to control the " -"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " -"method." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:363 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "그 후 가상 환경을 활성화합니다:" + +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " -"an identical signature to that of ``client_fn()``, but the return type is" -" `ServerAppComponents `_ as " -"opposed to `Client `_. In this example we use the " -"``FedAvg`` strategy." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:386 -#: ../../source/tutorial-quickstart-pytorch.rst:344 -#: ../../source/tutorial-quickstart-tensorflow.rst:266 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system." +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-jax.rst:60 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-scikitlearn.rst:59 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +#: ../../source/tutorial-quickstart-xgboost.rst:492 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:390 +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-jax.rst:102 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-scikitlearn.rst:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/tutorial-quickstart-xgboost.rst:537 msgid "" -"Check the `source code `_ of the extended version of this tutorial in ``examples" -"/quickstart-mlx`` in the Flower GitHub repository." +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-quickstart-fastai.rst:108 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:4 -msgid "Quickstart Pandas" +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:9 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:11 +#: ../../source/tutorial-quickstart-huggingface.rst:6 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:6 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 msgid "" -"In this federated learning tutorial we will learn how to train a " -"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:11 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" -" generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:26 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-jax.rst:32 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``PyTorch``), give a name to your project, " -"and type in your developer name:" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:117 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-jax.rst:46 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create dataloaders with the data" -" that correspond to their data partition." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:152 +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +#: ../../source/tutorial-quickstart-xgboost.rst:485 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"We defined a simple Convolutional Neural Network (CNN), but feel free to " -"replace it with a more sophisticated model if you'd like:" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-jax.rst:110 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 msgid "" -"In addition to defining the model architecture, we also include two " -"utility functions to perform both training (i.e. ``train()``) and " -"evaluation (i.e. ``test()``) using the above model. These functions " -"should look fairly familiar if you have some prior experience with " -"PyTorch. Note these functions do not have anything specific to Flower. " -"That being said, the training function will normally be called, as we'll " -"see later, from a Flower client passing its own data. In summary, your " -"clients can use standard training/testing functions to perform local " -"training or evaluation:" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-jax.rst:114 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#: ../../source/tutorial-quickstart-xgboost.rst:89 +#, fuzzy +msgid "The Data" +msgstr "Metadata" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"The main changes we have to make to use `PyTorch` with `Flower` will be " -"found in the ``get_weights()`` and ``set_weights()`` functions. In " -"``get_weights()`` PyTorch model parameters are extracted and represented " -"as a list of NumPy arrays. The ``set_weights()`` function that's the " -"oposite: given a list of NumPy arrays it applies them to an existing " -"PyTorch model. Doing this in fairly easy in PyTorch." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:282 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-jax.rst:128 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-scikitlearn.rst:138 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"`local-epochs` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additioinal hyperparameters in ``pyproject.toml`` and access them here." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:309 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will server as " -"the global model to federated. Note that the value of ``fraction_fit`` is" -" read from the run config. You can find the default value defined in the " -"``pyproject.toml``." +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:348 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" -"Check the `source code `_ of the extended version of this tutorial in " -"``examples/quickstart-pytorch`` in the Flower GitHub repository." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:354 -#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#: ../../source/tutorial-quickstart-xgboost.rst:149 #, fuzzy -msgid "Video tutorial" -msgstr "튜토리얼" +msgid "The ClientApp" +msgstr "클라이언트앱" -#: ../../source/tutorial-quickstart-pytorch.rst:358 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" -"The video shown below shows how to setup a PyTorch + Flower project using" -" our previously recommended APIs. A new video tutorial will be released " -"that shows the new APIs (as the content above does)" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"In this federated learning tutorial we will learn how to train an " -"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" -"This will create a new directory called `quickstart-pytorch-lightning` " -"containing the following files:" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-jax.rst:246 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-scikitlearn.rst:255 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#: ../../source/tutorial-quickstart-xgboost.rst:269 +#, fuzzy +msgid "The ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" -"By default, Flower Simulation Engine will be started and it will create a" -" federation of 4 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" To run the project, do:" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" -"Each simulated `ClientApp` (two per round) will also log a summary of " -"their local training process. Expect this output to be similar to:" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"Check the `source code `_ of this tutorial in ``examples" -"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:4 -msgid "Quickstart scikit-learn" +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#: ../../source/tutorial-quickstart-ios.rst:11 msgid "" -"In this tutorial, we will learn how to train a ``Logistic Regression`` " -"model on MNIST using Flower and scikit-learn." +"The experimental Flower iOS SDK is not compatible with the latest version" +" of Flower. iOS support is currently being reworked and will be released " +"in 2025." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/tutorial-quickstart-ios.rst:14 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new iOS SDK is released." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +#: ../../source/tutorial-quickstart-ios.rst:25 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:30 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#: ../../source/tutorial-quickstart-ios.rst:28 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:36 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/tutorial-quickstart-ios.rst:34 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within ``utils.py``. The " -"``utils.py`` contains different functions defining all the machine " -"learning basics:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -#, fuzzy -msgid "``get_model_parameters()``" -msgstr "모델 매개변수." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +#: ../../source/tutorial-quickstart-ios.rst:41 +msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid "``set_model_params()``" +#: ../../source/tutorial-quickstart-ios.rst:48 +msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 -msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +#: ../../source/tutorial-quickstart-ios.rst:50 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "``set_initial_params()``" +#: ../../source/tutorial-quickstart-ios.rst:88 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#: ../../source/tutorial-quickstart-ios.rst:102 msgid "" -"Please check out ``utils.py`` `here " -"`_ for more details. The pre-defined functions are used in" -" the ``client.py`` and imported. The ``client.py`` also requires to " -"import several packages such as Flower and scikit-learn:" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +#: ../../source/tutorial-quickstart-ios.rst:120 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -"``FederatedDataset.load_partition()`` method loads the partitioned " -"training set for each partition ID defined in the ``--partition-id`` " -"argument." +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#: ../../source/tutorial-quickstart-ios.rst:126 msgid "" -"Next, the logistic regression model is defined and initialized with " -"``utils.set_initial_params()``." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"The Flower server interacts with clients through an interface called " -"``Client``. When the server selects a particular client for training, it " -"sends training instructions over the network. The client receives those " -"instructions and calls one of the ``Client`` methods to run your code " -"(i.e., to fit the logistic regression we defined earlier)." +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:149 msgid "" -"Flower provides a convenience class called ``NumPyClient`` which makes it" -" easier to implement the ``Client`` interface when your workload uses " -"scikit-learn. Implementing ``NumPyClient`` usually means defining the " -"following methods (``set_parameters`` is optional though):" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 -#, fuzzy -msgid "``set_parameters`` (optional)" -msgstr ":code:`set_parameters (선택사항)`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#: ../../source/tutorial-quickstart-ios.rst:158 msgid "" -"update the local model weights with the parameters received from the " -"server" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:133 -msgid "is directly imported with ``utils.set_model_params()``" +#: ../../source/tutorial-quickstart-ios.rst:169 +msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:135 -msgid "set the local model weights" +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "train the local model" +#: ../../source/tutorial-quickstart-ios.rst:179 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:137 -#, fuzzy -msgid "return the updated local model weights" -msgstr "현재 로컬 모델 파라미터를 반환합니다." +#: ../../source/tutorial-quickstart-ios.rst:185 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -msgid "test the local model" +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:141 -msgid "The methods can be implemented in the following way:" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#: ../../source/tutorial-quickstart-jax.rst:6 msgid "" -"We can now create an instance of our class ``MnistClient`` and add one " -"line to actually run this client:" +"In this federated learning tutorial we will learn how to train a linear " +"regression model using Flower and `JAX " +"`_. It is recommended to create a " +"virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#: ../../source/tutorial-quickstart-jax.rst:11 msgid "" -"That's it for the client. We only have to implement ``Client`` or " -"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" -" client of type ``NumPyClient`` you'll need to first call its " -"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " -"which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" pass to the client." +"Let's use ``flwr new`` to create a complete Flower+JAX project. It will " +"generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_. A random " +"regression dataset will be loaded from scikit-learn's |makeregression|_ " +"function." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:24 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``JAX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -msgid "``server.py``, import Flower and start the server:" +#: ../../source/tutorial-quickstart-jax.rst:116 +msgid "" +"This tutorial uses scikit-learn's |makeregression|_ function to generate " +"a random regression problem." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#: ../../source/tutorial-quickstart-jax.rst:130 msgid "" -"The number of federated learning rounds is set in ``fit_round()`` and the" -" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " -"is called after each federated learning round and gives you information " -"about loss and accuracy. Note that we also make use of Flower Datasets " -"here to load the test split of the MNIST dataset for server-side " -"evaluation." +"We defined a simple linear regression model to demonstrate how to create " +"a JAX model, but feel free to replace it with a more sophisticated JAX " +"model if you'd like, (such as with NN-based `Flax " +"`_):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:141 msgid "" -"The ``main`` contains the server-side parameter initialization " -"``utils.set_initial_params()`` as well as the aggregation strategy " -"``fl.server.strategy:FedAvg()``. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``evaluation()``) using the above model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +#: ../../source/tutorial-quickstart-jax.rst:172 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"The main changes we have to make to use JAX with Flower will be found in " +"the ``get_params()`` and ``set_params()`` functions. In ``get_params()``," +" JAX model parameters are extracted and represented as a list of NumPy " +"arrays. The ``set_params()`` function is the opposite: given a list of " +"NumPy arrays it applies them to an existing JAX model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:264 -#: ../../source/tutorial-quickstart-xgboost.rst:598 +#: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"The ``get_params()`` and ``set_params()`` functions here are conceptually" +" similar to the ``get_weights()`` and ``set_weights()`` functions that we" +" defined in the :doc:`QuickStart PyTorch ` " +"tutorial." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "Open another terminal and start the second client:" +#: ../../source/tutorial-quickstart-jax.rst:227 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:277 -#: ../../source/tutorial-quickstart-xgboost.rst:611 +#: ../../source/tutorial-quickstart-jax.rst:248 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the ``FedAvg`` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``input_dim`` is read from the run config. You can find the default value" +" defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#: ../../source/tutorial-quickstart-jax.rst:276 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in ``examples/sklearn-logreg-" -"mnist``." +"learning system for JAX with Flower!" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-quickstart-jax.rst:281 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a CNN model on CIFAR-10." +"Check the source code of the extended version of this tutorial in " +"|quickstart_jax_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:4 -msgid "Quickstart TensorFlow" -msgstr "" +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "빠른 시작" -#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#: ../../source/tutorial-quickstart-mlx.rst:6 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " -"all, it is recommended to create a virtual environment and run everything" -" within a :doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:11 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " -"will generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " "`_. The " "dataset will be partitioned using Flower Dataset's `IidPartitioner " @@ -23829,722 +23675,1097 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:26 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``TensorFlow``), give a name to your project," -" and type in your developer name:" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create the ``NumPy`` arrays that" -" correspond to their data partition." +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:141 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"Next, we need a model. We defined a simple Convolutional Neural Network " -"(CNN), but feel free to replace it with a more sophisticated model if " -"you'd like:" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" -"With `TensorFlow`, we can use the built-in ``get_weights()`` and " -"``set_weights()`` functions, which simplifies the implementation with " -"`Flower`. The rest of the functionality in the ClientApp is directly " -"inspired by the centralized case. The ``fit()`` method in the client " -"trains the model using the local dataset. Similarly, the ``evaluate()`` " -"method is used to evaluate the model received on a held-out validation " -"set that the client might have:" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:203 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparameters defined in your " -"``pyproject.toml`` to configure the run. For example, in this tutorial we" -" access the `local-epochs` setting to control the number of epochs a " -"``ClientApp`` will perform when running the ``fit()`` method, in addition" -" to `batch-size`. You could define additional hyperparameters in " -"``pyproject.toml`` and access them here." +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:234 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will serve as " -"the global model to federate." +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_tf_link|_ in the Flower GitHub repository." +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:282 -msgid "" -"The video shown below shows how to setup a TensorFlow + Flower project " -"using our previously recommended APIs. A new video tutorial will be " -"released that shows the new APIs (as the content above does)" +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:4 -msgid "Quickstart XGBoost" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:13 -msgid "Federated XGBoost" +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:15 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:21 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "Why federated XGBoost?" +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:31 -msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:46 -msgid "Environment Setup" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:48 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:51 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"Congratulations! You've successfully built and run your first federated " +"learning system." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-mlx.rst:390 msgid "" -"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" -" go ahead and install ``xgboost``:" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:67 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:71 -msgid "" -"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " -"and other related functions:" +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:99 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:101 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:115 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"In this example, we split the dataset into 30 partitions with uniform " -"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " -"partition for the given client based on ``partition_id``:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:135 +#: ../../source/tutorial-quickstart-pytorch.rst:6 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for ``xgboost`` package." +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:149 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"The functions of ``train_test_split`` and " -"``transform_dataset_to_dmatrix`` are defined as below:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:190 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"The ``num_local_round`` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " -"metric." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:195 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define ``XgbClient`` class inherited from " -"``fl.client.Client``." +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:219 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" -"All required parameters defined above are passed to ``XgbClient``'s " -"constructor." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" -"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " -"insides ``XgbClient`` class as follows." +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use ``get_parameters`` " -"and ``set_parameters`` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in ``get_parameters`` when it is " -"called by the server at the first round." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:278 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " -"first set of trees. From the second round, we load the global model sent " -"from server to new build Booster object, and then update model weights on" -" local training data with function ``local_boost`` as follows:" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:298 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" -"Given ``num_local_round``, we update trees by calling " -"``bst_input.update`` method. After training, the last " -"``N=num_local_round`` trees will be extracted to send to the server." +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:330 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "튜토리얼" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" -" function to conduct evaluation on valid set. The AUC value will be " -"returned." +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:333 -msgid "" -"Now, we can create an instance of our class ``XgbClient`` and add one " -"line to actually run this client:" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:350 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" -"That's it for the client. We only have to implement ``Client`` and call " -"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " -"client which server to connect to. In our case we can run the server and " -"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" -" run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" point the client at." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:360 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:364 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"In a file named ``server.py``, import Flower and FedXgbBagging from " -"``flwr.server.strategy``." +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:367 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:401 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 msgid "" -"We use two clients for this example. An ``evaluate_metrics_aggregation`` " -"function is defined to collect and wighted average the AUC values from " -"clients. The ``config_func`` function is to return the current FL round " -"number to client's ``fit()`` and ``evaluate()`` methods." +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:406 -msgid "Then, we start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:418 -msgid "Tree-based bagging aggregation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:420 +#: ../../source/tutorial-quickstart-scikitlearn.rst:6 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"In this federated learning tutorial we will learn how to train a Logistic" +" Regression on MNIST using Flower and scikit-learn. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:422 +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " -"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " -"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " -"``evaluate`` methods as follows:" +"Let's use ``flwr new`` to create a complete Flower+scikit-learn project. " +"It will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_ The dataset " +"will be partitioned using |flowerdatasets|_'s |iidpartitioner|_" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:519 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" -"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " -"trees by calling ``aggregate()`` function:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``sklearn``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:579 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -"``_get_tree_nums``. Then, the fetched information will be aggregated. " -"After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `MNIST `_ dataset. In this" +" example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. Each ``ClientApp`` will call this function " +"to create dataloaders with the data that correspond to their data " +"partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:584 +#: ../../source/tutorial-quickstart-scikitlearn.rst:140 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"We define the |logisticregression|_ model from scikit-learn in the " +"``get_model()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:588 -msgid "Launch Federated XGBoost!" +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"To perform the training and evaluation, we will make use of the " +"``.fit()`` and ``.score()`` methods available in the " +"``LogisticRegression`` class." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:664 +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in ``metrics_distributed``." -" One can see that the average AUC increases over FL rounds." +"The main changes we have to make to use scikit-learn with Flower will be " +"found in the ``get_model_params()``, ``set_model_params()``, and " +"``set_initial_params()`` functions. In ``get_model_params()``, the " +"coefficients and intercept of the logistic regression model are extracted" +" and represented as a list of NumPy arrays. In ``set_model_params()``, " +"that's the opposite: given a list of NumPy arrays it applies them to an " +"existing ``LogisticRegression`` model. Finally, in " +"``set_initial_params()``, we initialize the model parameters based on the" +" MNIST dataset, which has 10 classes (corresponding to the 10 digits) and" +" 784 features (corresponding to the size of the MNIST image array, which " +"is 28 × 28). Doing this is fairly easy in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:668 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"The full `source code `_ for this example can be found in ``examples" -"/xgboost-quickstart``." +"The rest of the functionality is directly inspired by the centralized " +"case:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:673 -msgid "Comprehensive Federated XGBoost" +#: ../../source/tutorial-quickstart-scikitlearn.rst:226 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"``context`` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:675 +#: ../../source/tutorial-quickstart-scikitlearn.rst:257 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a zero-initialized model that will " +"server as the global model to be federated. Note that the values of " +"``num-server-rounds``, ``penalty``, and ``local-epochs`` are read from " +"the run config. You can find the default values defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:685 -msgid "Cyclic training" +#: ../../source/tutorial-quickstart-scikitlearn.rst:295 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:687 +#: ../../source/tutorial-quickstart-scikitlearn.rst:300 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Check the source code of the extended version of this tutorial in " +"|quickstart_sklearn_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:733 -msgid "" -"The customised ``ClientManager`` samples all available clients in each FL" -" round based on the order of connection to the server. Then, we define a " -"new strategy ``FedXgbCyclic`` in " -"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:775 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" -"Unlike the original ``FedAvg``, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding ``aggregate_fit``." +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:778 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" -"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" -" ensure the clients to be sequentially selected given FL round:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:840 -msgid "Customised data partitioning" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:842 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" -"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " -"instantiate the data partitioner based on the given ``num_partitions`` " -"and ``partitioner_type``. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data " -"quantity (uniform, linear, square, exponential)." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:873 -msgid "Customised centralised/distributed evaluation" +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:875 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" -"To facilitate centralised evaluation, we define a function in " -"``server_utils.py``:" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:907 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" -"This function returns a evaluation function which instantiates a " -"``Booster`` object and loads the global model weights to it. The " -"evaluation is conducted by calling ``eval_set()`` method, and the tested " -"AUC value is reported." +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:911 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the ``evaluate()`` method insides the " -"``XgbClient`` class in ``client_utils.py``." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:916 -msgid "Flower simulation" +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:918 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" -"We also provide an example code (``sim.py``) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:954 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" -"After importing all required packages, we define a ``main()`` function to" -" perform the simulation process:" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1010 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a ``list``. After the simulation begins, the " -"clients won't need to pre-process their partitions again." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1014 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1065 +#: ../../source/tutorial-quickstart-xgboost.rst:7 +msgid "XGBoost" +msgstr "XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:9 msgid "" -"After that, we start the simulation by calling " -"``fl.simulation.start_simulation``:" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1085 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"One of key parameters for ``start_simulation`` is ``client_fn`` which " -"returns a function to construct a client. We define it as follows:" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1126 -msgid "Arguments parser" -msgstr "" +#: ../../source/tutorial-quickstart-xgboost.rst:19 +#, fuzzy +msgid "Why Federated XGBoost?" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/tutorial-quickstart-xgboost.rst:1128 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" -"In ``utils.py``, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. " -"Let's first see the sever side:" +"As the demand for data privacy and decentralized learning grows, there's " +"an increasing requirement to implement federated XGBoost systems for " +"specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1175 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with ``--centralised-eval``, the sever " -"will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data privacy" +" is paramount. Given the robustness and efficiency of XGBoost, combining " +"it with federated learning offers a promising solution for these specific" +" challenges." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1180 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1234 +#: ../../source/tutorial-quickstart-xgboost.rst:33 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting ``--centralised-eval``, as well as an option to perform " -"scaled learning rate based on the number of clients by setting " -"``--scaled-lr``." +"In this tutorial, we learn how to train a federated XGBoost model on the " +"HIGGS dataset using Flower and the ``xgboost`` package to perform a " +"binary classification task. We use a simple example (`full code xgboost-" +"quickstart `_) to demonstrate how federated XGBoost works, and then we " +"dive into a more complex comprehensive example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1239 -msgid "We also have an argument parser for simulation:" +#: ../../source/tutorial-quickstart-xgboost.rst:42 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1317 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/tutorial-quickstart-xgboost.rst:45 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1320 -msgid "Example commands" +#: ../../source/tutorial-quickstart-xgboost.rst:52 +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#: ../../source/tutorial-quickstart-xgboost.rst:60 +#, fuzzy +msgid "The Configurations" +msgstr "구성 값" + +#: ../../source/tutorial-quickstart-xgboost.rst:62 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"We define all required configurations / hyper-parameters inside the " +"``pyproject.toml`` file:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1329 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/tutorial-quickstart-xgboost.rst:84 +msgid "" +"The ``local-epochs`` represents the number of iterations for local tree " +"boost. We use CPU for the training in default. One can assign it to a GPU" +" by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1335 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/tutorial-quickstart-xgboost.rst:91 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `HIGGS` dataset." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#: ../../source/tutorial-quickstart-xgboost.rst:105 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" ``examples/xgboost-comprehensive``." +"In this example, we split the dataset into 20 partitions with uniform " +"distribution (`IidPartitioner `_)." +" Then, we load the partition for the given client based on " +"``partition_id``." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/tutorial-quickstart-xgboost.rst:110 +msgid "" +"Subsequently, we train/test split using the given partition (client's " +"local data), and reformat data to DMatrix for the ``xgboost`` package." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/tutorial-quickstart-xgboost.rst:124 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and the Flower framework (`part 1 " -"`__) and we learned how strategies can be used to customize " -"the execution on both the server and the clients (`part 2 " -"`__)." +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-xgboost.rst:151 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg using " -"the Flower framework, Flower Datasets, and PyTorch." +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Let's first see how we define " +"Flower client for XGBoost. We follow the general rule to define " +"``FlowerClient`` class inherited from ``fl.client.Client``." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -#, fuzzy +#: ../../source/tutorial-quickstart-xgboost.rst:176 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Flower Discuss and the Flower Slack to connect, " -"ask questions, and get help: - `Join Flower Discuss " -"`__ We'd love to hear from you in the " -"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " -"Beginners``. - `Join Flower Slack `__ We'd " -"love to hear from you in the ``#introductions`` channel! If anything is " -"unclear, head over to the ``#questions`` channel." +"All required parameters defined above are passed to ``FlowerClient``'s " +"constructor." msgstr "" -"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" -" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " -"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 -msgid "Let's build a new ``Strategy`` from scratch! 🌼" +#: ../../source/tutorial-quickstart-xgboost.rst:178 +msgid "" +"Then, we override ``fit`` and ``evaluate`` methods insides " +"``FlowerClient`` class as follows." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 -msgid "Preparation" +#: ../../source/tutorial-quickstart-xgboost.rst:217 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``_local_boost`` as follows:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +#: ../../source/tutorial-quickstart-xgboost.rst:237 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 -msgid "Installing dependencies" +#: ../../source/tutorial-quickstart-xgboost.rst:265 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 -msgid "First, we install the necessary packages:" +#: ../../source/tutorial-quickstart-xgboost.rst:271 +msgid "" +"After the local training on clients, clients' model updates are sent to " +"the *server*, which aggregates them to produce a better model. Finally, " +"the *server* sends this improved model version back to each *client* to " +"complete a federated round." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +#: ../../source/tutorial-quickstart-xgboost.rst:275 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"In the file named ``server_app.py``, we define a strategy for XGBoost " +"bagging aggregation:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +#: ../../source/tutorial-quickstart-xgboost.rst:308 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"An ``evaluate_metrics_aggregation`` function is defined to collect and " +"wighted average the AUC values from clients. The ``config_func`` function" +" is to return the current FL round number to client's ``fit()`` and " +"``evaluate()`` methods." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 -msgid "Data loading" +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "Tree-based Bagging Aggregation" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +#: ../../source/tutorial-quickstart-xgboost.rst:315 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "Model training/evaluation" +#: ../../source/tutorial-quickstart-xgboost.rst:317 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +#: ../../source/tutorial-quickstart-xgboost.rst:414 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 -msgid "Flower client" +#: ../../source/tutorial-quickstart-xgboost.rst:474 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#: ../../source/tutorial-quickstart-xgboost.rst:479 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``partition_id`` to the client and use it log additional details. We then" -" create an instance of ``ClientApp`` and pass it the ``client_fn``." +"After traversal of all clients' models, a new global model is generated, " +"followed by serialisation, and sending the global model back to each " +"client." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +#: ../../source/tutorial-quickstart-xgboost.rst:483 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:533 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``History (metrics, " +"distributed, evaluate)``. One can see that the average AUC increases over" +" FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:547 +msgid "" +"Check the full `source code " +"`_ " +"for this example in ``examples/xgboost-quickstart`` in the Flower GitHub " +"repository." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:552 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:554 +msgid "" +"Now that you know how federated XGBoost works with Flower, it's time to " +"run some more comprehensive experiments by customising the experimental " +"settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised / distributed evaluation. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:562 +#, fuzzy +msgid "Cyclic Training" +msgstr "중앙 집중식 훈련" + +#: ../../source/tutorial-quickstart-xgboost.rst:564 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:570 +msgid "To do this, we first customise a ``ClientManager`` in ``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:610 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to the " +"next client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:655 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Customised Data Partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In ``task.py``, we use the ``instantiate_fds`` function to instantiate " +"Flower Datasets and the data partitioner based on the given " +"``partitioner_type`` and ``num_partitions``. Currently, we provide four " +"supported partitioner type to simulate the uniformity/non-uniformity in " +"data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:726 +#, fuzzy +msgid "Customised Centralised / Distributed Evaluation" +msgstr "중앙 집중식 평가" + +#: ../../source/tutorial-quickstart-xgboost.rst:728 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 +msgid "" +"This function returns an evaluation function, which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:763 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_app.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:768 +#, fuzzy +msgid "Arguments Explainer" +msgstr "빌드 전달인자" + +#: ../../source/tutorial-quickstart-xgboost.rst:770 +msgid "" +"We define all hyper-parameters under ``[tool.flwr.app.config]`` entry in " +"``pyproject.toml``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:799 +msgid "" +"On the server side, we allow user to specify training strategies / FL " +"rounds / participating clients / clients for evaluation, and evaluation " +"fashion. Note that with ``centralised-eval = true``, the sever will do " +"centralised evaluation and all functionalities for client evaluation will" +" be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:804 +msgid "" +"On the client side, we can define various options for client data " +"partitioning. Besides, clients also have an option to conduct evaluation " +"on centralised test set by setting ``centralised-eval = true``, as well " +"as an option to perform scaled learning rate based on the number of " +"clients by setting ``scaled-lr = true``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:810 +#, fuzzy +msgid "Example Commands" +msgstr "예시" + +#: ../../source/tutorial-quickstart-xgboost.rst:812 +msgid "To run bagging aggregation for 5 rounds evaluated on centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:818 +msgid "" +"To run cyclic training with linear partitioner type evaluated on " +"centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:827 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:833 +#, fuzzy +msgid "Video Tutorial" +msgstr "튜토리얼" + +#: ../../source/tutorial-quickstart-xgboost.rst:837 +msgid "" +"The video shown below shows how to setup a XGBoost + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 msgid "Let's test what we have so far before we continue:" msgstr "" @@ -25401,7 +25622,6 @@ msgid "" msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 msgid "Final remarks" msgstr "" @@ -25720,8 +25940,9 @@ msgstr "" "있을 것입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#, fuzzy msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"🧑‍🏫 This tutorial starts from zero and expects no familiarity with " "federated learning. Only a basic understanding of data science and Python" " programming is assumed." msgstr "" @@ -25746,12 +25967,14 @@ msgid "Let's get started!" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#, fuzzy +msgid "Classical Machine Learning" msgstr "전통적인 머신러닝(기계학습)" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#, fuzzy msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " +"Before we begin discussing federated learning, let us quickly recap how " "most machine learning works today." msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 간략히 요약하겠습니다." @@ -25765,7 +25988,7 @@ msgstr "" " 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|ac0a9766e26044d6aea222a829859b20|" +msgid "|80152fa658904be08c849b4a594b76e1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -25782,7 +26005,7 @@ msgstr "" " 바둑과 같은 게임을 하는 것일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgid "|35b60a1068f944ce937ac2988661aad5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -25790,14 +26013,17 @@ msgid "Train model using data" msgstr "데이터를 이용한 모델 훈련" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#, fuzzy msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"In practice, the training data we work with doesn't originate on the " +"machine we train the model on." msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#, fuzzy msgid "" -"It originates on a smartphone by the user interacting with an app, a car " +"This data gets created \"somewhere else\". For instance, the data can " +"originate on a smartphone by the user interacting with an app, a car " "collecting sensor data, a laptop receiving input via the keyboard, or a " "smart speaker listening to someone trying to sing a song." msgstr "" @@ -25805,7 +26031,7 @@ msgstr "" "부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgid "|efead7f2c2224b60b7b42705004c15e6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -25823,7 +26049,7 @@ msgstr "" "있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgid "|5421fee4e7ed450c903cbcd8a9d8a5d4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -25831,17 +26057,18 @@ msgid "Data is on many devices" msgstr "데이터가 여러 장치에 있습니다" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#, fuzzy msgid "" "So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"that has been used in the past was to collect all this data on a central " +"server. This server can be located somewhere in a data center, or " +"somewhere in the cloud." msgstr "" "따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 " "서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgid "|811fcf35e9214bd5b4e613e41f7c0a27|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -25858,7 +26085,7 @@ msgstr "" " 우리가 기본적으로 사용해 온 머신러닝 방법입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgid "|e61d38b0948f4c07a7257755f3799b54|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -25870,17 +26097,18 @@ msgid "Challenges of classical machine learning" msgstr "클래식 머신러닝의 어려움" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#, fuzzy msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"This classical machine learning approach we've just seen can be used in " +"some cases. Great examples include categorizing holiday photos, or " +"analyzing web traffic. Cases, where all the data is naturally available " +"on a centralized server." msgstr "" "우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 " "트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|5405ed430e4746e28b083b146fb71731|" +msgid "|e82c29351e2e480087c61b939eb7c041|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -25897,7 +26125,7 @@ msgstr "" "좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|a389e87dab394eb48a8949aa2397687b|" +msgid "|21ca40f4fb1a405c89098fd1d24880a4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -25905,8 +26133,9 @@ msgid "Centralized impossible" msgstr "집중화 불가능" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#, fuzzy msgid "" -"There are many reasons why the classic centralized machine learning " +"There are many reasons why the classical centralized machine learning " "approach does not work for a large number of highly important real-world " "use cases. Those reasons include:" msgstr "" @@ -25914,6 +26143,7 @@ msgstr "" "다음과 같은 여러 가지가 있습니다:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#, fuzzy msgid "" "**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " "(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " @@ -25921,9 +26151,9 @@ msgid "" "(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " "protect sensitive data from being moved. In fact, those regulations " "sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"users' data for machine learning training because those users live in " +"different parts of the world, and their data is governed by different " +"data protection regulations." msgstr "" "**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), PDPL (아르헨티나), " "KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR (중국), PDPB (인도), PIPA (한국), " @@ -25965,23 +26195,27 @@ msgid "Examples where centralized machine learning does not work include:" msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#, fuzzy msgid "" "Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"detection models." msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#, fuzzy msgid "" "Financial information from different organizations to detect financial " -"fraud" +"fraud." msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#, fuzzy +msgid "Location data from your electric car to make better range prediction." msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#, fuzzy +msgid "End-to-end encrypted messages to train better auto-complete models." msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 @@ -26000,39 +26234,44 @@ msgstr "" "프라이버시 데이터를 활용하려면 어떻게 해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 분야입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#, fuzzy +msgid "Federated Learning" msgstr "연합 학습" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#, fuzzy msgid "" -"Federated learning simply reverses this approach. It enables machine " +"Federated Learning simply reverses this approach. It enables machine " "learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"of moving the data to the training. Here's a one-liner explanation:" msgstr "" "연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 컴퓨팅 능력을 데이터가 생성되는 장소로 이동 " "시킴으로써 분산된 데이터에서 머신러닝을 실현합니다. 요약하자면:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#, fuzzy +msgid "Centralized machine learning: move the data to the computation" msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#, fuzzy +msgid "Federated (machine) Learning: move the computation to the data" msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#, fuzzy msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"By doing so, Federated Learning enables us to use machine learning (and " +"other data science approaches) in areas where it wasn't possible before. " +"We can now train excellent medical AI models by enabling different " +"hospitals to work together. We can solve financial fraud by training AI " +"models on the data of different financial institutions. We can build " +"novel privacy-enhancing applications (such as secure messaging) that have" +" better built-in AI than their non-privacy-enhancing alternatives. And " +"those are just a few of the examples that come to mind. As we deploy " +"Federated Learning, we discover more and more areas that can suddenly be " +"reinvented because they now have access to vast amounts of previously " +"inaccessible data." msgstr "" "이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 사용할 수 있습니다. 이제 다양한 병원이 협력할 수 " "있도록 함으로써 우수한 의료 AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 훈련하여 금융 사기를 " @@ -26041,8 +26280,9 @@ msgstr "" "없었던 많은 데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 많아지고 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#, fuzzy msgid "" -"So how does federated learning work, exactly? Let's start with an " +"So how does Federated Learning work, exactly? Let's start with an " "intuitive explanation." msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." @@ -26064,7 +26304,7 @@ msgstr "" "체크포인트에서 모델 매개변수를 초기화합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|89c412136a5146ec8dc32c0973729f12|" +msgid "|1351a2629c2c46d981b13b19f9fa45f0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -26078,20 +26318,21 @@ msgid "" msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#, fuzzy msgid "" "Next, we send the parameters of the global model to the connected client " "nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"organizations). This is to ensure that each participating node starts its" +" local training using the same model parameters. We often use only a few " +"of the connected nodes instead of all nodes. The reason for this is that " +"selecting more and more client nodes has diminishing returns." msgstr "" "다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 에지 디바이스 또는 조직에 속한 서버)로 보냅니다. " "이것은 각 참여 노드가 동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. 일반적으로 모든 노드가 아닌 몇 " "개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgid "|124c2c188b994c7ab1c862cfdb326923|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -26118,7 +26359,7 @@ msgstr "" "데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgid "|42e1951c36f2406e93c7ae0ec5b299f9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -26144,7 +26385,7 @@ msgstr "" "보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgid "|ec637b8a84234d068995ee1ccb2dd3b1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -26168,11 +26409,12 @@ msgstr "" "모든 클라이언트 노드의 데이터에서 학습한 내용을 포함하는 모델을 하나만 갖고 싶지 않았습니까?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#, fuzzy msgid "" "In order to get one single model, we have to combine all the model " "updates we received from the client nodes. This process is called " "*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +" way is called *Federated Averaging* (`McMahan et al., 2016 " "`__), often abbreviated as *FedAvg*. " "*FedAvg* takes the 100 model updates and, as the name suggests, averages " "them. To be more precise, it takes the *weighted average* of the model " @@ -26193,7 +26435,7 @@ msgstr "" "많은 영향을 미칩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgid "|5bceb9d16b1a4d2db18d8a5b2f0cacb3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -26258,7 +26500,8 @@ msgstr "" " 학습 시스템에서 필수적인 부분입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" +#, fuzzy +msgid "Federated Analytics" msgstr "연합 분석" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 @@ -26312,7 +26555,7 @@ msgstr "" "사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|55472eef61274ba1b739408607e109df|" +msgid "|502b10044e864ca2b15282a393ab7faf|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -26321,6 +26564,10 @@ msgid "" " computer, roomba, and phone)" msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, 룸바, 전화)" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final Remarks" +msgstr "" + #: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" "Congratulations, you just learned the basics of federated learning and " @@ -26764,2537 +27011,6475 @@ msgstr "" #~ "파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." #~ msgid "" -#~ "Note that `flwr `__" -#~ " is already installed in the " -#~ "``flwr/supernode`` base image, so you " -#~ "only need to include other package " -#~ "dependencies in your ``requirements.txt``, " -#~ "such as ``torch``, ``tensorflow``, etc." -#~ msgstr "" -#~ "`flwr `__ 는 이미 " -#~ "``flwr/supernode`` 기본 이미지에 설치되어 있으므로, " -#~ "``torch``, ``tensorflow`` 등과 같은 다른 패키지" -#~ " dependencies만 ``requirements.txt``에 포함시키면 됩니다." +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" +#~ "`flwr `__ 는 이미 " +#~ "``flwr/supernode`` 기본 이미지에 설치되어 있으므로, " +#~ "``torch``, ``tensorflow`` 등과 같은 다른 패키지" +#~ " dependencies만 ``requirements.txt``에 포함시키면 됩니다." + +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` " +#~ "예제를 사용하는 경우 ``examples/quickstart-pytorch``에" +#~ " ``Dockerfile.supernode``라는 새 파일을 생성합니다." + +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습니다." + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" +#~ "처음 두 줄에서는 ``nightly`` 태그가 붙은 " +#~ "SuperNode 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로," +#~ " ``requirements.txt`` 파일을 이미지에 복사하여 " +#~ "ClientApp dependencies 요소를 설치하고 ``pip " +#~ "install``을 실행합니다. 마지막 두 줄에서 " +#~ "``client.py`` 모듈을 이미지에 복사하고 ``client:app`` " +#~ "인수를 사용하여 진입점을 ``flower-client-app``로 " +#~ "설정합니다. 인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 " +#~ "객체 참조 (``:``) 입니다." + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 빌드" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" +#~ "이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1``" +#~ " 태그를 붙였습니다. 여기서 선택한 값은 예시일 뿐이라는" +#~ " 점을 기억하세요. 필요에 따라 변경할 수 있습니다." + +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 실행" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" + +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." + +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정합니다" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__를 생성하고 ``--network`` argument를 " +#~ "사용하고 SuperNodes를 실행할 Docker 네트워크의 이름을" +#~ " 전달하면 됩니다." + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "SuperNode 바이너리에 전달됩니다. SuperNode가 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 합니다." + +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "SuperNode 이미지와 마찬가지로 ServerApp Docker " +#~ "이미지는 Flower의 사전 설치된 버전과 함께 제공되며," +#~ " 자체 ServerApp 이미지를 구축하기 위한 기본 " +#~ "역할을 합니다." + +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-" +#~ "pytorch`` 예제를 사용하겠습니다. 아직 수행하지 않았다면 " +#~ "계속 진행하기 전에 `SuperNode Prerequisites`_ 을" +#~ " 따르세요." + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "ServerApp Dockerfile 만들기" + +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을" +#~ " 생성해야 합니다. ``quickstart-pytorch`` 예제를 " +#~ "사용하는 경우 ``examples/quickstart-pytorch``에 " +#~ "``Dockerfile.serverapp``이라는 새 파일을 생성합니다." + +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니다." + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "처음 두 줄에서는 ``1.8.0`` 태그가 붙은 " +#~ "ServerApp 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막" +#~ " 두 줄에서는 ``server.py`` 모듈을 이미지에 복사하고" +#~ " ``server:app`` argument를 사용하여 진입점을 " +#~ "``flower-server-app``로 설정합니다. 인수는 ServerApp" +#~ " 컨테이너 내에서 실행될 ServerApp의 객체 " +#~ "참조(``:``)입니다." + +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 빌드" + +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 실행" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지정합니다" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__,를 생성하고 ``--network`` argument를 " +#~ "사용하여 ServerApp을 실행할 Docker 네트워크의 이름을 " +#~ "전달하면 됩니다." + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "ServerApp 바이너리에 전달됩니다. ServerApp에서 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 합니다." + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` " +#~ "플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +#~ "디렉터리에 마운트할 수 있습니다. 이렇게 하면 " +#~ "ServerApp이 컨테이너 내의 인증서에 액세스할 수 " +#~ "있습니다. 컨테이너를 시작할 때 ``--root-" +#~ "certificates`` 플래그를 사용하세요." + +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" + +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_supernode `\\ \\(\\)" + +#~ msgid "d defaults to None." +#~ msgstr "d는 기본값이 None입니다." + +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." + +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" + +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" + +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" + +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" +#~ "클라이언트 앱의 특정 데이터 파티션을 로드할 때 " +#~ "사용할 수 있는 식별자입니다. 시뮬레이션을 수행할 때 " +#~ "이 식별자를 사용하는 것이 더 적절합니다." + +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" + +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." + +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "Flower SuperLink(Driver API 및 Fleet API)를 실행하세요." + +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgstr "" + +#~ msgid "run\\_driver\\_api" +#~ msgstr "" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" + +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" + +#~ msgid "key shares." +#~ msgstr "" + +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" + +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" + +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" + +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" + +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" + +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" + +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" + +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" + +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" + +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" + +#~ msgid "receive the updated local model weights" +#~ msgstr "" + +#~ msgid "which can be implemented in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" + +#~ msgid "" +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." +#~ msgstr "" + +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." +#~ msgstr "" + +#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." +#~ msgstr "" + +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" +#~ msgstr "" + +#~ msgid "" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" +#~ msgstr "" + +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" +#~ msgstr "" + +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." +#~ msgstr "" + +#~ msgid "" +#~ "`Check out Flower Code Examples " +#~ "`__" +#~ msgstr "" + +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "Loading the data" +#~ msgstr "" + +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." +#~ msgstr "" + +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" +#~ msgstr "" + +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" +#~ msgstr "" + +#~ msgid "" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." +#~ msgstr "" + +#~ msgid "Defining the model" +#~ msgstr "" + +#~ msgid "Training the model" +#~ msgstr "" + +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" +#~ msgstr "" + +#~ msgid "" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" +#~ msgstr "" + +#~ msgid "Updating model parameters" +#~ msgstr "" + +#~ msgid "" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." +#~ msgstr "" + +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" +#~ msgstr "" + +#~ msgid "Implementing a Flower client" +#~ msgstr "" + +#~ msgid "" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." +#~ msgstr "" + +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" + +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" +#~ msgstr "" + +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" +#~ msgstr "" + +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" + +#~ msgid "Using the Virtual Client Engine" +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" +#~ msgstr "" + +#~ msgid "Starting the training" +#~ msgstr "" + +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." +#~ msgstr "" + +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" + +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" + +#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgstr "" + +#~ msgid "" +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" +#~ msgstr "" + +#~ msgid "" +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." +#~ msgstr "" + +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" +#~ msgstr "" + +#~ msgid "" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" +#~ msgstr "" + +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" + +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "|01471150fd5144c080a176b43e92a3ff|" + +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "|9bc21c7dbd17444a8f070c60786e3484|" + +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "|3047bbce54b34099ae559963d0420d79|" + +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" + +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "|c24c1478b30e4f74839208628a842d1e|" + +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "|1b3613d7a58847b59e1d3180802dbc09|" + +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "|9980b5213db547d0b8024a50992b9e3f|" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "|032eb6fed6924ac387b9f13854919196|" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "|fbf225add7fd4df5a9bf25a95597d954|" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "|7efbe3d29d8349b89594e8947e910525|" + +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "|329fb3c04c744eda83bb51fa444c2266|" + +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" + +#~ msgid "run\\_client\\_app" +#~ msgstr "클라이언트앱" + +#~ msgid "run\\_supernode" +#~ msgstr "run\\_supernode" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" + +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "Flower 서버앱" + +#~ msgid "run\\_superlink" +#~ msgstr "flower 초연결" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "멀티 노드 Flower 시뮬레이션" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" + +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" + +#~ msgid "Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" + +#~ msgid "Handling the data" +#~ msgstr "" + +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" + +#~ msgid "Training and testing the model" +#~ msgstr "" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" + +#~ msgid "Creating the model itself" +#~ msgstr "" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" + +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "" + +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" + +#~ msgid "Putting everything together" +#~ msgstr "" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "Each client will have its own dataset." +#~ msgstr "" + +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" + +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" + +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" + +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" + +#~ msgid "" +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" +#~ msgstr "" + +#~ msgid ":code:`get_model_parameters()`" +#~ msgstr "" + +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_model_params()`" +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_initial_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" +#~ msgstr "" + +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" + +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" +#~ msgstr "" + +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" + +#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" +#~ msgstr "" + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." +#~ msgstr "" + +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" + +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" + +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." +#~ msgstr "" + +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" + +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." +#~ msgstr "" + +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." +#~ msgstr "" + +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" + +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" + +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" + +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" + +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" + +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" + +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" + +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" + +#~ msgid "" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" + +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" + +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgstr "" + +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" + +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgstr "" + +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" + +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" + +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" + +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" + +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" + +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" + +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgstr "" + +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgstr "" + +#~ msgid "" +#~ "Include SecAgg, SecAgg+, and LightSecAgg " +#~ "protocol. The LightSecAgg protocol has " +#~ "not been implemented yet, so its " +#~ "diagram and abstraction may not be " +#~ "accurate in practice. The SecAgg " +#~ "protocol can be considered as a " +#~ "special case of the SecAgg+ protocol." +#~ msgstr "" +#~ "SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. " +#~ "LightSecAgg 프로토콜은 아직 구현되지 않았기 때문에 " +#~ "다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다." +#~ " SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로" +#~ " 간주할 수 있습니다." + +#~ msgid "The ``SecAgg+`` abstraction" +#~ msgstr "The :code:`SecAgg+` 추상화" + +#~ msgid "" +#~ "In this implementation, each client will" +#~ " be assigned with a unique index " +#~ "(int) for secure aggregation, and thus" +#~ " many python dictionaries used have " +#~ "keys of int type rather than " +#~ "ClientProxy type." +#~ msgstr "" +#~ "구현에서는 각 클라이언트에 secure aggregation를 위한" +#~ " 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " +#~ "dictionaries에는 ClientProxy 타입이 아닌 int " +#~ "타입의 키가 있습니다." + +#~ msgid "" +#~ "The Flower server will execute and " +#~ "process received results in the " +#~ "following order:" +#~ msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" + +#~ msgid "The ``LightSecAgg`` abstraction" +#~ msgstr "The :code:`LightSecAgg` 추상" + +#~ msgid "Types" +#~ msgstr "타입" + +#~ msgid "" +#~ "Docker Compose is `installed " +#~ "`_." +#~ msgstr "" + +#~ msgid "Run the example:" +#~ msgstr "전체 코드 예제" + +#~ msgid "Follow the logs of the SuperExec service:" +#~ msgstr "" + +#~ msgid "Only runs on AMD64." +#~ msgstr "" + +#~ msgid "" +#~ "Use the method that works best for" +#~ " you to copy the ``server`` " +#~ "directory, the certificates, and your " +#~ "Flower project to the remote machine." +#~ msgstr "" + +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``server`` Docker Compose files." +#~ msgstr "" + +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``client`` Docker Compose files." +#~ msgstr "" + +#~ msgid "" +#~ "The Path of the ``root-certificates``" +#~ " should be relative to the location" +#~ " of the ``pyproject.toml`` file." +#~ msgstr "" + +#~ msgid "To run the project, execute:" +#~ msgstr "" + +#~ msgid "Run the ``quickstart-docker`` project by executing the command:" +#~ msgstr "" + +#~ msgid "Follow the SuperExec logs to track the execution of the run:" +#~ msgstr "" + +#~ msgid "Execute the command to run the quickstart example:" +#~ msgstr "" + +#~ msgid "Monitor the SuperExec logs and wait for the summary to appear:" +#~ msgstr "" + +#~ msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#~ msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" + +#~ msgid "" +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called ``cifar.py``, revised part is " +#~ "shown below:" +#~ msgstr "" +#~ "모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 " +#~ "연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는" +#~ " 파일을 수정하기만 하면 되며, 수정된 부분은 아래와" +#~ " 같습니다:" + +#~ msgid "" +#~ "The model architecture defined in class" +#~ " Net() is added with Batch " +#~ "Normalization layers accordingly." +#~ msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." + +#~ msgid "You can now run your machine learning workload:" +#~ msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the system consists of one " +#~ "server and two clients." +#~ msgstr "" +#~ "지금까지는 파이토치를 사용해 본 적이 있다면 상당히 " +#~ "익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 " +#~ "구축한 것을 사용하여 FedBN 내에서 하나의 서버와 " +#~ "두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 " +#~ "보겠습니다." + +#~ msgid "Federated Training" +#~ msgstr "연합 훈련" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only ``get_parameters`` " +#~ "and ``set_parameters`` function in " +#~ "``client.py`` needed to revise. If not," +#~ " please read the :doc:`Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`. first." +#~ msgstr "" +#~ ":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 " +#~ "`를 읽었다면, 다음 부분은 쉽게 따라할 수" +#~ " 있으며 :code:`client.py`의 :code:`get_parameters`와 " +#~ ":code:`set_parameters` 함수만 수정해야 합니다. 그렇지 " +#~ "않은 경우 :doc:`예제: 파이토치 - 중앙 집중식에서" +#~ " 연합식으로 `를 먼저 읽어보세요." + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ "``server.py`` keeps unchanged, we can " +#~ "start the server directly." +#~ msgstr "" +#~ "이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 " +#~ "구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " +#~ "그대로 유지되므로 서버를 바로 시작할 수 있습니다." + +#~ msgid "Now, you can now open two additional terminal windows and run" +#~ msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning with FedBN strategy across two" +#~ " clients. Congratulations!" +#~ msgstr "" +#~ "를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 " +#~ "확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 " +#~ "두 클라이언트에서 FedBN으로 연합 학습을 실행하는 것을" +#~ " 확인합니다. 축하합니다!" + +#~ msgid "Example: PyTorch - From Centralized To Federated" +#~ msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload. We are using PyTorch to" +#~ " train a Convolutional Neural Network " +#~ "on the CIFAR-10 dataset. First, we " +#~ "introduce this machine learning task " +#~ "with a centralized training approach " +#~ "based on the `Deep Learning with " +#~ "PyTorch " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 " +#~ "워크로드의 연합 버전을 구축하는 방법을 보여드립니다. 여기서는" +#~ " PyTorch를 사용해 CIFAR-10 데이터 세트에서 컨볼루션" +#~ " 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " +#~ "`_" +#~ " 튜토리얼을 기반으로 centralized 학습 접근 방식을 " +#~ "사용하여 이 머신 러닝 작업을 소개합니다. 그런 " +#~ "다음 centralized 훈련 코드를 기반으로 연합 방식" +#~ " 훈련을 실행합니다." + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized CNN training code." +#~ " If you want a more in-depth" +#~ " explanation of what's going on then" +#~ " have a look at the official " +#~ "`PyTorch tutorial " +#~ "`_." +#~ msgstr "" +#~ "중앙 집중식 CNN 트레이닝 코드에 대한 간략한 " +#~ "설명부터 시작하겠습니다. 무슨 일이 일어나고 있는지 더 " +#~ "자세히 설명하려면 공식 `PyTorch 튜토리얼 " +#~ "`_을" +#~ " 참조하세요." + +#~ msgid "" +#~ "Let's create a new file called " +#~ "``cifar.py`` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as ``torch`` and" +#~ " ``torchvision``) need to be imported. " +#~ "You can see that we do not " +#~ "import any package for federated " +#~ "learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." +#~ msgstr "" +#~ "CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한" +#~ " 모든 구성 요소가 포함된 :code:`cifar.py`라는 새" +#~ " 파일을 생성해 보겠습니다. 먼저, 필요한 모든 " +#~ "패키지(예: :code:`torch` 및 :code:`torchvision`)를 " +#~ "가져와야 합니다. 연합 학습을 위한 패키지를 가져오지 " +#~ "않는 것을 확인 할 수 있습니. 나중에 연합 " +#~ "학습 구성 요소를 추가할 때에도 이러한 모든 " +#~ "가져오기를 그대로 유지할 수 있습니다." + +#~ msgid "" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in ``class Net()``." +#~ msgstr "" +#~ "이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10" +#~ " 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 " +#~ "컨볼루션 신경망)는 :code:`class Net()`에 정의되어 " +#~ "있습니다." + +#~ msgid "" +#~ "The ``load_data()`` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ "``transform`` normalized the data after " +#~ "loading." +#~ msgstr "" +#~ ":code:`load_data()` 함수는 CIFAR-10 훈련 및 " +#~ "테스트 세트를 로드합니다. :code:`transform`은 로드 후" +#~ " 데이터를 정규화합니다." + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function ``train()``) which loops over" +#~ " the training set, measures the loss," +#~ " backpropagates it, and then takes " +#~ "one optimizer step for each batch " +#~ "of training examples." +#~ msgstr "" +#~ "이제 학습 집합을 반복하고, 손실을 측정하고, 이를 " +#~ "역전파한 다음 각 학습 예제 배치에 대해 하나의" +#~ " 최적화 단계를 수행하는 학습(함수 :code:`train()`)을 " +#~ "정의해야 합니다." + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function ``test()``. The" +#~ " function loops over all test samples" +#~ " and measures the loss of the " +#~ "model based on the test dataset." +#~ msgstr "" +#~ "모델 평가는 :code:`test()` 함수에 정의되어 있습니다. " +#~ "이 함수는 모든 테스트 샘플을 반복하고 테스트 " +#~ "데이터 세트에 따라 모델의 손실을 측정합니다." + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our CNN on CIFAR-10." +#~ msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." + +#~ msgid "" +#~ "So far, this should all look " +#~ "fairly familiar if you've used PyTorch" +#~ " before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." +#~ msgstr "" +#~ "지금까지는 파이토치를 사용해 본 적이 있다면 상당히 " +#~ "익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 " +#~ "것을 사용하여 하나의 서버와 두 개의 클라이언트로 " +#~ "구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." + +#~ msgid "" +#~ "The simple machine learning project " +#~ "discussed in the previous section trains" +#~ " the model on a single dataset " +#~ "(CIFAR-10), we call this centralized " +#~ "learning. This concept of centralized " +#~ "learning, as shown in the previous " +#~ "section, is probably known to most " +#~ "of you, and many of you have " +#~ "used it previously. Normally, if you'd" +#~ " want to run machine learning " +#~ "workloads in a federated fashion, then" +#~ " you'd have to change most of " +#~ "your code and set everything up " +#~ "from scratch. This can be a " +#~ "considerable effort." +#~ msgstr "" +#~ "이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 " +#~ "단일 데이터 세트(CIFAR-10)로 모델을 학습시키는데, 이를 " +#~ "중앙 집중식 학습이라고 부릅니다. 이전 섹션에서 설명한 " +#~ "중앙 집중식 학습의 개념은 대부분 알고 계실 " +#~ "것이며, 많은 분들이 이전에 사용해 보셨을 것입니다. " +#~ "일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면" +#~ " 대부분의 코드를 변경하고 모든 것을 처음부터 다시" +#~ " 설정해야 합니다. 이는 상당한 노력이 필요할 수 " +#~ "있습니다." + +#~ msgid "" +#~ "However, with Flower you can evolve " +#~ "your pre-existing code into a " +#~ "federated learning setup without the " +#~ "need for a major rewrite." +#~ msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." + +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in ``cifar.py``" +#~ " for the *clients* that are connected" +#~ " to the *server*. The *server* sends" +#~ " model parameters to the clients. The" +#~ " *clients* run the training and " +#~ "update the parameters. The updated " +#~ "parameters are sent back to the " +#~ "*server* which averages all received " +#~ "parameter updates. This describes one " +#~ "round of the federated learning process" +#~ " and we repeat this for multiple " +#~ "rounds." +#~ msgstr "" +#~ "개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에" +#~ " 연결된 *클라이언트*에 대해 :code:`cifar.py`의 코드를 " +#~ "사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 " +#~ "전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 업데이트합니다." +#~ " 업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 " +#~ "수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은" +#~ " 연합 학습 프로세스의 한 라운드를 설명하며 여러 " +#~ "라운드에 걸쳐 이 과정을 반복합니다." + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ "``server.py`` first. The *server* needs " +#~ "to import the Flower package ``flwr``." +#~ " Next, we use the ``start_server`` " +#~ "function to start a server and " +#~ "tell it to perform three rounds of" +#~ " federated learning." +#~ msgstr "" +#~ "이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 " +#~ "구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " +#~ "*server*는 Flower 패키지 :code:`flwr`를 가져와야 " +#~ "합니다. 다음으로, :code:`start_server` 함수를 사용하여 " +#~ "서버를 시작하고 세 차례의 연합 학습을 수행하도록 " +#~ "지시합니다." + +#~ msgid "We can already start the *server*:" +#~ msgstr "이미 *서버*를 시작할 수 있습니다:" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined centralized " +#~ "training in ``cifar.py``. Our *client* " +#~ "needs to import ``flwr``, but also " +#~ "``torch`` to update the parameters on" +#~ " our PyTorch model:" +#~ msgstr "" +#~ "마지막으로, :code:`client.py`에서 *client* 로직을 정의하고" +#~ " :code:`cifar.py`에서 이전에 정의한 중앙 집중식 " +#~ "학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 " +#~ "가져와야 하며, PyTorch 모델의 파라미터를 업데이트하기 " +#~ "위해 :code:`torch`도 가져와야 합니다:" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``CifarClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. ``CifarClient``" +#~ " needs to implement four methods, two" +#~ " methods for getting/setting model " +#~ "parameters, one method for training the" +#~ " model, and one method for testing" +#~ " the model:" +#~ msgstr "" +#~ "Flower *클라이언트*를 구현한다는 것은 기본적으로 " +#~ ":code:`flwr.client.Client` 또는 " +#~ ":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 " +#~ "의미합니다. 우리의 구현은 :code:`flwr.client.NumPyClient`를 " +#~ "기반으로 하며, 이를 :code:`CifarClient`라고 부를 " +#~ "것입니다. :code:`NumPyClient`는 파이토치나 텐서플로우/Keras처럼 " +#~ "NumPy 상호운용성이 좋은 프레임워크를 사용하는 경우 필요한" +#~ " 일부 보일러플레이트를 피하기 때문에 :code:`Client`보다 " +#~ "구현하기가 조금 더 쉽습니다. code:`CifarClient`는 모델" +#~ " 파라미터를 가져오거나 설정하는 메서드 2개, 모델 " +#~ "학습을 위한 메서드 1개, 모델 테스트를 위한 " +#~ "메서드 1개 등 네 가지 메서드를 구현해야 합니다:" + +#~ msgid "``set_parameters``" +#~ msgstr ":code:`set_parameters`" + +#~ msgid "" +#~ "set the model parameters on the " +#~ "local model that are received from " +#~ "the server" +#~ msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" + +#~ msgid "" +#~ "loop over the list of model " +#~ "parameters received as NumPy ``ndarray``'s " +#~ "(think list of neural network layers)" +#~ msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" + +#~ msgid "``get_parameters``" +#~ msgstr ":code:`get_parameters`" + +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ "``ndarray``'s (which is what " +#~ "``flwr.client.NumPyClient`` expects)" +#~ msgstr "" +#~ "모델 매개변수를 가져와서 NumPy :code:`ndarray`의 " +#~ "목록으로 반환합니다(이는 :code:`flwr.client.NumPyClient`가 기대하는" +#~ " 바와 같습니다)" + +#~ msgid "" +#~ "update the parameters of the local " +#~ "model with the parameters received from" +#~ " the server" +#~ msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" + +#~ msgid "train the model on the local training set" +#~ msgstr "로컬 훈련 세트에서 모델을 훈련합니다" + +#~ msgid "get the updated local model weights and return them to the server" +#~ msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" + +#~ msgid "evaluate the updated model on the local test set" +#~ msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" + +#~ msgid "return the local loss and accuracy to the server" +#~ msgstr "로컬 손실 및 정확도를 서버에 반환합니다" + +#~ msgid "" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``test()`` " +#~ "previously defined in ``cifar.py``. So " +#~ "what we really do here is we " +#~ "tell Flower through our ``NumPyClient`` " +#~ "subclass which of our already defined" +#~ " functions to call for training and" +#~ " evaluation. We included type annotations" +#~ " to give you a better understanding" +#~ " of the data types that get " +#~ "passed around." +#~ msgstr "" +#~ "두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 " +#~ ":code:`evaluate`는 이전에 :code:`cifar.py`에 정의된 " +#~ "함수인 :code:`train()`과 :code:`test()`를 활용합니다. " +#~ "따라서 여기서 실제로 하는 일은 :code:`NumPyClient`" +#~ " 서브클래스를 통해 이미 정의된 함수 중 훈련과 " +#~ "평가를 위해 호출할 함수를 Flower에 알려주는 것입니다." +#~ " 전달되는 데이터 유형을 더 잘 이해할 수 " +#~ "있도록 type annotations을 포함했습니다." + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ "``CifarClient``, and starts this client. " +#~ "You load your data and model by" +#~ " using ``cifar.py``. Start ``CifarClient`` " +#~ "with the function ``fl.client.start_client()`` " +#~ "by pointing it at the same IP " +#~ "address we used in ``server.py``:" +#~ msgstr "" +#~ "이제 모델과 데이터를 모두 로드하는 함수를 정의하고, " +#~ ":code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " +#~ "작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을" +#~ " 로드합니다. :code:`server.py`에서 사용한 것과 동일한 " +#~ "IP 주소를 지정하여 :code:`fl.client.start_client()` " +#~ "함수로 :code:`CifarClient`를 시작합니다:" + +#~ msgid "And that's it. You can now open two additional terminal windows and run" +#~ msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is running before you do " +#~ "so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning across two clients. Congratulations!" +#~ msgstr "" +#~ "를 입력하고(그 전에 서버가 실행 중인지 확인하세요) " +#~ "(이전에는 중앙 집중식) PyTorch 프로젝트가 두 " +#~ "클라이언트에서 연합 학습을 실행하는 것을 확인합니다. " +#~ "축하합니다!" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `PyTorch: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is, of course, somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using different subsets of CIFAR-10 " +#~ "on each client? How about adding " +#~ "more clients?" +#~ msgstr "" +#~ "이 예제의 전체 소스 코드: `파이토치: 중앙 " +#~ "Centralized에서 Federated으로 (코드) " +#~ "`_. 물론 이 " +#~ "예제는 두 클라이언트가 완전히 동일한 데이터 세트를 " +#~ "로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 " +#~ "현실적이지 않습니다. 이제 이 주제를 더 자세히 " +#~ "살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 " +#~ "CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" +#~ " 클라이언트를 더 추가하는 것은 어떨까요?" + +#~ msgid "" +#~ "To help you start and manage all" +#~ " of the concurrently executing training " +#~ "runs, Flower offers one additional " +#~ "long-running server-side service called " +#~ "**SuperExec**. When you type ``flwr " +#~ "run`` to start a new training run," +#~ " the ``flwr`` CLI bundles your local" +#~ " project (mainly your ``ServerApp`` and " +#~ "``ClientApp``) and sends it to the " +#~ "**SuperExec**. The **SuperExec** will then " +#~ "take care of starting and managing " +#~ "your ``ServerApp``, which in turn " +#~ "selects SuperNodes to execute your " +#~ "``ClientApp``." +#~ msgstr "" + +#~ msgid "" +#~ "This architecture allows many users to" +#~ " (concurrently) run their projects on " +#~ "the same federation, simply by typing" +#~ " ``flwr run`` on their local " +#~ "developer machine." +#~ msgstr "" + +#~ msgid "Flower Deployment Engine with SuperExec" +#~ msgstr "" + +#~ msgid "The SuperExec service for managing concurrent training runs in Flower." +#~ msgstr "" + +#~ msgid "FED Template" +#~ msgstr "FED 템플릿" + +#~ msgid "Table of Contents" +#~ msgstr "목차" + +#~ msgid "[Table of Contents](#table-of-contents)" +#~ msgstr "[목차](#목차)" + +#~ msgid "[Summary](#summary)" +#~ msgstr "[요약](#요약)" + +#~ msgid "[Motivation](#motivation)" +#~ msgstr "[동기](#동기)" + +#~ msgid "[Goals](#goals)" +#~ msgstr "[목표](#목표)" + +#~ msgid "[Non-Goals](#non-goals)" +#~ msgstr "[비목표](#비목표)" + +#~ msgid "[Proposal](#proposal)" +#~ msgstr "[제안](#제안)" + +#~ msgid "[Drawbacks](#drawbacks)" +#~ msgstr "[단점](#단점)" + +#~ msgid "[Alternatives Considered](#alternatives-considered)" +#~ msgstr "[고려되는 대안](#고려되는 대안)" + +#~ msgid "[Appendix](#appendix)" +#~ msgstr "[부록](#부록)" + +#~ msgid "Summary" +#~ msgstr "요약" + +#~ msgid "\\[TODO - sentence 1: summary of the problem\\]" +#~ msgstr "\\[TODO - 문장 1: 문제 요약\\]" + +#~ msgid "\\[TODO - sentence 2: summary of the solution\\]" +#~ msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" + +#~ msgid "Motivation" +#~ msgstr "동기" + +#~ msgid "\\[TODO\\]" +#~ msgstr "\\[TODO\\]" + +#~ msgid "Goals" +#~ msgstr "목표" + +#~ msgid "Non-Goals" +#~ msgstr "목표가 아닌 것" + +#~ msgid "Proposal" +#~ msgstr "제안" + +#~ msgid "Drawbacks" +#~ msgstr "단점" + +#~ msgid "Alternatives Considered" +#~ msgstr "고려되는 대안" + +#~ msgid "\\[Alternative 1\\]" +#~ msgstr "\\[대안 1\\]" + +#~ msgid "\\[Alternative 2\\]" +#~ msgstr "\\[대안 2\\]" + +#~ msgid "Flower Enhancement Doc" +#~ msgstr "Flower Enhancement Doc" + +#~ msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#~ msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" + +#~ msgid "[Metadata](#metadata)" +#~ msgstr "[Metadata](#metadata)" + +#~ msgid "[Workflow](#workflow)" +#~ msgstr "[워크플로우](#워크플로우)" + +#~ msgid "[GitHub Issues](#github-issues)" +#~ msgstr "[GitHub Issues](#github-issues)" + +#~ msgid "[Google Docs](#google-docs)" +#~ msgstr "[Google Docs](#google-docs)" + +#~ msgid "A Flower Enhancement is a standardized development process to" +#~ msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" + +#~ msgid "provide a common structure for proposing larger changes" +#~ msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" + +#~ msgid "ensure that the motivation for a change is clear" +#~ msgstr "변화의 동기가 분명한지 확인합니다" + +#~ msgid "persist project information in a version control system" +#~ msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" + +#~ msgid "document the motivation for impactful user-facing changes" +#~ msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" + +#~ msgid "reserve GitHub issues for tracking work in flight" +#~ msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" + +#~ msgid "" +#~ "ensure community participants can successfully" +#~ " drive changes to completion across " +#~ "one or more releases while stakeholders" +#~ " are adequately represented throughout the" +#~ " process" +#~ msgstr "" +#~ "커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 " +#~ "성공적으로 완료할 수 있도록 하는 동시에 이해 " +#~ "관계자가 프로세스 전반에 걸쳐 적절히 대표되도록 보장합니다" + +#~ msgid "Hence, an Enhancement Doc combines aspects of" +#~ msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" + +#~ msgid "a feature, and effort-tracking document" +#~ msgstr "기능 및 effort-tracking 문서" + +#~ msgid "a product requirements document" +#~ msgstr "제품 요구 사항 문서" + +#~ msgid "a design document" +#~ msgstr "디자인 문서" + +#~ msgid "" +#~ "into one file, which is created " +#~ "incrementally in collaboration with the " +#~ "community." +#~ msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." + +#~ msgid "" +#~ "For far-fetching changes or features " +#~ "proposed to Flower, an abstraction " +#~ "beyond a single GitHub issue or " +#~ "pull request is required to understand" +#~ " and communicate upcoming changes to " +#~ "the project." +#~ msgstr "" +#~ "Flower에 제안된 변경 사항이나 기능을 멀리 가져오는" +#~ " 경우, 프로젝트의 향후 변경 사항을 이해하고 전달하기" +#~ " 위해 단일 GitHub 이슈 또는 pull " +#~ "request를 넘어서는 abstraction이 필요합니다." + +#~ msgid "" +#~ "The purpose of this process is to" +#~ " reduce the amount of \"tribal " +#~ "knowledge\" in our community. By moving" +#~ " decisions from Slack threads, video " +#~ "calls, and hallway conversations into a" +#~ " well-tracked artifact, this process " +#~ "aims to enhance communication and " +#~ "discoverability." +#~ msgstr "" +#~ "이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 " +#~ "양을 줄이는 것입니다. 이 프로세스는 Slack 스레드," +#~ " 영상 통화, 복도 대화에서 나온 의사 결정을 " +#~ "잘 추적된 아티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을" +#~ " 향상시키는 것을 목표로 합니다." + +#~ msgid "" +#~ "Roughly any larger, user-facing " +#~ "enhancement should follow the Enhancement " +#~ "process. If an enhancement would be " +#~ "described in either written or verbal" +#~ " communication to anyone besides the " +#~ "author or developer, then consider " +#~ "creating an Enhancement Doc." +#~ msgstr "" +#~ "대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 " +#~ "개선 프로세스를 따라야 합니다. 개선 사항을 작성자나 " +#~ "개발자 이외의 다른 사람에게 서면 또는 구두로 " +#~ "설명해야 하는 경우에는 개선 문서 작성을 고려하세요." #~ msgid "" -#~ "Next, we create a Dockerfile. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.supernode`` in ``examples/quickstart-" -#~ "pytorch``." -#~ msgstr "" -#~ "다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` " -#~ "예제를 사용하는 경우 ``examples/quickstart-pytorch``에" -#~ " ``Dockerfile.supernode``라는 새 파일을 생성합니다." +#~ "Similarly, any technical effort (refactoring," +#~ " major architectural change) that will " +#~ "impact a large section of the " +#~ "development community should also be " +#~ "communicated widely. The Enhancement process" +#~ " is suited for this even if it" +#~ " will have zero impact on the " +#~ "typical user or operator." +#~ msgstr "" +#~ "마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 " +#~ "기술적 노력(리팩토링, 주요 아키텍처 변경)도 널리 알려야" +#~ " 합니다. 개선 프로세스는 일반 사용자나 운영자에게 전혀" +#~ " 영향을 미치지 않더라도 이를 위해 적합합니다." #~ msgid "" -#~ "The ``Dockerfile.supernode`` contains the " -#~ "instructions that assemble the SuperNode " -#~ "image." -#~ msgstr "``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습니다." +#~ "For small changes and additions, going" +#~ " through the Enhancement process would " +#~ "be time-consuming and unnecessary. This" +#~ " includes, for example, adding new " +#~ "Federated Learning algorithms, as these " +#~ "only add features without changing how" +#~ " Flower works or is used." +#~ msgstr "" +#~ "작은 변경 및 추가의 경우, 개선 프로세스를 거치는" +#~ " 것은 시간이 많이 걸리고 불필요합니다. 예를 들어," +#~ " 새로운 연합 학습 알고리즘을 추가하는 것은 " +#~ "Flower의 작동 방식이나 사용 방식을 변경하지 않고 " +#~ "기능만 추가하는 것이기 때문입니다." #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the SuperNode " -#~ "image tagged ``nightly`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. Next, we install the" -#~ " ClientApp dependencies by copying the " -#~ "``requirements.txt`` file into the image " -#~ "and run ``pip install``. In the " -#~ "last two lines, we copy the " -#~ "``client.py`` module into the image and" -#~ " set the entry point to ``flower-" -#~ "client-app`` with the argument " -#~ "``client:app``. The argument is the " -#~ "object reference of the ClientApp " -#~ "(``:``) that will be run" -#~ " inside the ClientApp." +#~ "Enhancements are different from feature " +#~ "requests, as they are already providing" +#~ " a laid-out path for implementation" +#~ " and are championed by members of " +#~ "the community." +#~ msgstr "기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지하는 것이므로 기능 요청과는 다릅니다." + +#~ msgid "" +#~ "An Enhancement is captured in a " +#~ "Markdown file that follows a defined " +#~ "template and a workflow to review " +#~ "and store enhancement docs for reference" +#~ " — the Enhancement Doc." #~ msgstr "" -#~ "처음 두 줄에서는 ``nightly`` 태그가 붙은 " -#~ "SuperNode 이미지를 기본 이미지로 사용하고 작업 " -#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" -#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로," -#~ " ``requirements.txt`` 파일을 이미지에 복사하여 " -#~ "ClientApp dependencies 요소를 설치하고 ``pip " -#~ "install``을 실행합니다. 마지막 두 줄에서 " -#~ "``client.py`` 모듈을 이미지에 복사하고 ``client:app`` " -#~ "인수를 사용하여 진입점을 ``flower-client-app``로 " -#~ "설정합니다. 인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 " -#~ "객체 참조 (``:``) 입니다." +#~ "개선 사항은 정의된 템플릿과 참조용으로 Enhancement " +#~ "Doc.를 검토하고 저장하는 워크플로우를 따르는 Markdown " +#~ "파일에 캡처됩니다." -#~ msgid "Building the SuperNode Docker image" -#~ msgstr "SuperNode Docker 이미지 빌드" +#~ msgid "Enhancement Doc Template" +#~ msgstr "Enhancement Doc 템플릿" #~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_supernode``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." -#~ msgstr "" -#~ "이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1``" -#~ " 태그를 붙였습니다. 여기서 선택한 값은 예시일 뿐이라는" -#~ " 점을 기억하세요. 필요에 따라 변경할 수 있습니다." +#~ "Each enhancement doc is provided as " +#~ "a Markdown file having the following " +#~ "structure" +#~ msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" -#~ msgid "Running the SuperNode Docker image" -#~ msgstr "SuperNode Docker 이미지 실행" +#~ msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#~ msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" -#~ msgid "Now that we have built the SuperNode image, we can finally run it." -#~ msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." +#~ msgid "Title (same as in metadata)" +#~ msgstr "Title (metadata와 같게)" -#~ msgid "Let's break down each part of this command:" -#~ msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" +#~ msgid "Table of Contents (if needed)" +#~ msgstr "Table of Contents (필요시)" + +#~ msgid "Notes/Constraints/Caveats (optional)" +#~ msgstr "Notes/Constraints/Caveats (선택 사항)" + +#~ msgid "Design Details (optional)" +#~ msgstr "Design Details (선택 사항)" + +#~ msgid "Graduation Criteria" +#~ msgstr "졸업 기준" + +#~ msgid "Upgrade/Downgrade Strategy (if applicable)" +#~ msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" + +#~ msgid "As a reference, this document follows the above structure." +#~ msgstr "참고로 이 문서는 위의 구조를 따릅니다." #~ msgid "" -#~ "``--rm``: This option specifies that the" -#~ " container should be automatically removed" -#~ " when it stops." -#~ msgstr "``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." +#~ "**fed-number** (Required) The `fed-" +#~ "number` of the last Flower Enhancement" +#~ " Doc + 1. With this number, it" +#~ " becomes easy to reference other " +#~ "proposals." +#~ msgstr "" +#~ "**피드 번호** (필수) 마지막 Flower Enhancement" +#~ " 문서의 `피드 번호` + 1. 이 번호를 " +#~ "사용하면 다른 제안을 쉽게 참조할 수 있습니다." -#~ msgid "``--insecure``: This option enables insecure communication." -#~ msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." +#~ msgid "**title** (Required) The title of the proposal in plain language." +#~ msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." #~ msgid "" -#~ "``--superlink 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" -#~ msgstr "``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정합니다" +#~ "**status** (Required) The current status " +#~ "of the proposal. See [workflow](#workflow) " +#~ "for the possible states." +#~ msgstr "**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)를 참조하세요." -#~ msgid "API to connect to. Remember to update it with your SuperLink IP." -#~ msgstr "API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." +#~ msgid "" +#~ "**authors** (Required) A list of authors" +#~ " of the proposal. This is simply " +#~ "the GitHub ID." +#~ msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your SuperNodes." -#~ msgstr "" -#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " -#~ "network `__를 생성하고 ``--network`` argument를 " -#~ "사용하고 SuperNodes를 실행할 Docker 네트워크의 이름을" -#~ " 전달하면 됩니다." +#~ "**creation-date** (Required) The date " +#~ "that the proposal was first submitted" +#~ " in a PR." +#~ msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "SuperNode binary. To see all available" -#~ " flags that the SuperNode supports, " -#~ "run:" -#~ msgstr "" -#~ "태그 뒤에 오는 모든 argument는 Flower " -#~ "SuperNode 바이너리에 전달됩니다. SuperNode가 지원하는 " -#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" +#~ "**last-updated** (Optional) The date " +#~ "that the proposal was last changed " +#~ "significantly." +#~ msgstr "**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." #~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your SuperNode container." -#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 합니다." +#~ "**see-also** (Optional) A list of " +#~ "other proposals that are relevant to " +#~ "this one." +#~ msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." + +#~ msgid "**replaces** (Optional) A list of proposals that this one replaces." +#~ msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." #~ msgid "" -#~ "Similar to the SuperNode image, the " -#~ "ServerApp Docker image comes with a " -#~ "pre-installed version of Flower and " -#~ "serves as a base for building your" -#~ " own ServerApp image." -#~ msgstr "" -#~ "SuperNode 이미지와 마찬가지로 ServerApp Docker " -#~ "이미지는 Flower의 사전 설치된 버전과 함께 제공되며," -#~ " 자체 ServerApp 이미지를 구축하기 위한 기본 " -#~ "역할을 합니다." +#~ "**superseded-by** (Optional) A list of" +#~ " proposals that this one supersedes." +#~ msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." + +#~ msgid "Workflow" +#~ msgstr "워크플로우" #~ msgid "" -#~ "We will use the same ``quickstart-" -#~ "pytorch`` example as we do in the" -#~ " Flower SuperNode section. If you " -#~ "have not already done so, please " -#~ "follow the `SuperNode Prerequisites`_ before" -#~ " proceeding." -#~ msgstr "" -#~ "여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-" -#~ "pytorch`` 예제를 사용하겠습니다. 아직 수행하지 않았다면 " -#~ "계속 진행하기 전에 `SuperNode Prerequisites`_ 을" -#~ " 따르세요." +#~ "The idea forming the enhancement should" +#~ " already have been discussed or " +#~ "pitched in the community. As such, " +#~ "it needs a champion, usually the " +#~ "author, who shepherds the enhancement. " +#~ "This person also has to find " +#~ "committers to Flower willing to review" +#~ " the proposal." +#~ msgstr "" +#~ "개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나" +#~ " 제안된 적이 있어야 합니다. 따라서 개선 사항을 " +#~ "주도하는 사(보통 작성자)이 필요합니다. 이 사람은 또한" +#~ " 제안을 검토할 의향이 있는 Flower 커미터를 찾아야" +#~ " 합니다." -#~ msgid "Creating a ServerApp Dockerfile" -#~ msgstr "ServerApp Dockerfile 만들기" +#~ msgid "" +#~ "New enhancements are checked in with " +#~ "a file name in the form of " +#~ "`NNNN-YYYYMMDD-enhancement-title.md`, with " +#~ "`NNNN` being the Flower Enhancement Doc" +#~ " number, to `enhancements`. All " +#~ "enhancements start in `provisional` state " +#~ "as part of a pull request. " +#~ "Discussions are done as part of " +#~ "the pull request review." +#~ msgstr "" +#~ "새 개선 사항은 `NNNN-YYYYMMDD-enhancement-" +#~ "title.md` 형식의 파일 이름으로 체크인되며, `NNNN`은 " +#~ "Flower 개선 문서 번호이고 `enhancements`에 해당합니다." +#~ " 모든 개선 사항은 pull request의 일부로 " +#~ "`잠정` 상태에서 시작됩니다. 토론은 pull request " +#~ "검토의 일부로 이루어집니다." #~ msgid "" -#~ "First, we need to create a " -#~ "Dockerfile in the directory where the" -#~ " ``ServerApp`` code is located. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" -#~ "pytorch``." -#~ msgstr "" -#~ "먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을" -#~ " 생성해야 합니다. ``quickstart-pytorch`` 예제를 " -#~ "사용하는 경우 ``examples/quickstart-pytorch``에 " -#~ "``Dockerfile.serverapp``이라는 새 파일을 생성합니다." +#~ "Once an enhancement has been reviewed" +#~ " and approved, its status is changed" +#~ " to `implementable`. The actual " +#~ "implementation is then done in separate" +#~ " pull requests. These pull requests " +#~ "should mention the respective enhancement " +#~ "as part of their description. After " +#~ "the implementation is done, the proposal" +#~ " status is changed to `implemented`." +#~ msgstr "" +#~ "개선 사항이 검토 및 승인되면 상태가 '구현 " +#~ "가능'으로 변경됩니다. 그런 다음 실제 구현은 별도의 " +#~ "pull requests를 통해 이루어집니다. 이러한 pull " +#~ "requests는 설명의 일부로 해당 개선 사항을 언급해야" +#~ " 합니다. 구현이 완료되면 제안 상태는 '구현됨'으로 " +#~ "변경됩니다." #~ msgid "" -#~ "The ``Dockerfile.serverapp`` contains the " -#~ "instructions that assemble the ServerApp " -#~ "image." -#~ msgstr "``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니다." +#~ "Under certain conditions, other states " +#~ "are possible. An Enhancement has the " +#~ "following states:" +#~ msgstr "특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the ServerApp " -#~ "image tagged ``1.8.0`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. In the last two " -#~ "lines, we copy the ``server.py`` module" -#~ " into the image and set the " -#~ "entry point to ``flower-server-app`` " -#~ "with the argument ``server:app``. The " -#~ "argument is the object reference of " -#~ "the ServerApp (``:``) that " -#~ "will be run inside the ServerApp " -#~ "container." +#~ "`provisional`: The enhancement has been " +#~ "proposed and is actively being defined." +#~ " This is the starting state while " +#~ "the proposal is being fleshed out " +#~ "and actively defined and discussed." #~ msgstr "" -#~ "처음 두 줄에서는 ``1.8.0`` 태그가 붙은 " -#~ "ServerApp 이미지를 기본 이미지로 사용하고 작업 " -#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" -#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막" -#~ " 두 줄에서는 ``server.py`` 모듈을 이미지에 복사하고" -#~ " ``server:app`` argument를 사용하여 진입점을 " -#~ "``flower-server-app``로 설정합니다. 인수는 ServerApp" -#~ " 컨테이너 내에서 실행될 ServerApp의 객체 " -#~ "참조(``:``)입니다." +#~ "'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다." +#~ " 제안이 구체화되고 활발하게 정의 및 논의되는 동안의" +#~ " 시작 단계입니다." -#~ msgid "Building the ServerApp Docker image" -#~ msgstr "ServerApp Docker 이미지 빌드" +#~ msgid "`implementable`: The enhancement has been reviewed and approved." +#~ msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." -#~ msgid "Running the ServerApp Docker image" -#~ msgstr "ServerApp Docker 이미지 실행" +#~ msgid "" +#~ "`implemented`: The enhancement has been " +#~ "implemented and is no longer actively" +#~ " changed." +#~ msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." -#~ msgid "Now that we have built the ServerApp image, we can finally run it." -#~ msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." +#~ msgid "" +#~ "`deferred`: The enhancement is proposed " +#~ "but not actively being worked on." +#~ msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." #~ msgid "" -#~ "``--superlink 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" -#~ msgstr "``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지정합니다" +#~ "`rejected`: The authors and reviewers " +#~ "have decided that this enhancement is" +#~ " not moving forward." +#~ msgstr "`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니다." + +#~ msgid "`withdrawn`: The authors have withdrawn the enhancement." +#~ msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." + +#~ msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#~ msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your ServerApps." +#~ "Adding an additional process to the " +#~ "ones already provided by GitHub (Issues" +#~ " and Pull Requests) adds more " +#~ "complexity and can be a barrier " +#~ "for potential first-time contributors." #~ msgstr "" -#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " -#~ "network `__,를 생성하고 ``--network`` argument를 " -#~ "사용하여 ServerApp을 실행할 Docker 네트워크의 이름을 " -#~ "전달하면 됩니다." +#~ "GitHub에서 이미 제공하는 프로세스(이슈 및 Pull " +#~ "Requests)에 추가 프로세스를 추가하면 더 복잡해지고 " +#~ "잠재적인 처음인 기여자에게는 장벽이 될 수 있습니다." #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "ServerApp binary. To see all available" -#~ " flags that the ServerApp supports, " -#~ "run:" +#~ "Expanding the proposal template beyond " +#~ "the single-sentence description currently " +#~ "required in the features issue template" +#~ " may be a heavy burden for " +#~ "non-native English speakers." #~ msgstr "" -#~ "태그 뒤에 오는 모든 argument는 Flower " -#~ "ServerApp 바이너리에 전달됩니다. ServerApp에서 지원하는 " -#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" +#~ "현재 기능 이슈 템플릿에서 요구되는 한 문장 설명" +#~ " 이상으로 제안서 템플릿을 확장하는 것은 영어가 모국어가" +#~ " 아닌 사용자에게는 큰 부담이 될 수 있습니다." -#~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your ServerApp container." -#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 합니다." +#~ msgid "GitHub Issues" +#~ msgstr "GitHub 이슈" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flags when starting the container." -#~ msgstr "" -#~ "인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` " -#~ "플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " -#~ "디렉터리에 마운트할 수 있습니다. 이렇게 하면 " -#~ "ServerApp이 컨테이너 내의 인증서에 액세스할 수 " -#~ "있습니다. 컨테이너를 시작할 때 ``--root-" -#~ "certificates`` 플래그를 사용하세요." - -#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" -#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#~ "Using GitHub Issues for these kinds " +#~ "of enhancements is doable. One could " +#~ "use, for example, tags, to differentiate" +#~ " and filter them from other issues." +#~ " The main issue is in discussing " +#~ "and reviewing an enhancement: GitHub " +#~ "issues only have a single thread " +#~ "for comments. Enhancements usually have " +#~ "multiple threads of discussion at the" +#~ " same time for various parts of " +#~ "the doc. Managing these multiple " +#~ "discussions can be confusing when using" +#~ " GitHub Issues." +#~ msgstr "" +#~ "이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 " +#~ "가능합니다. 예를 들어 태그를 사용하여 다른 이슈와 " +#~ "구별하고 필터링할 수 있습니다. 주요 이슈는 개선 " +#~ "사항에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는" +#~ " 댓글 스레드가 하나만 있습니다. 개선 사항에는 " +#~ "일반적으로 문서의 여러 부분에 대해 동시에 여러 " +#~ "개의 토론 스레드가 있습니다. GitHub 이슈를 사용할" +#~ " 때 이러한 여러 토론을 관리하면 혼란스러울 수 " +#~ "있습니다." + +#~ msgid "Google Docs" +#~ msgstr "Google 문서 도구" -#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" -#~ msgstr ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgid "" +#~ "Google Docs allow for multiple threads" +#~ " of discussions. But as Google Docs" +#~ " are hosted outside the project, " +#~ "their discoverability by the community " +#~ "needs to be taken care of. A " +#~ "list of links to all proposals has" +#~ " to be managed and made available " +#~ "for the community. Compared to shipping" +#~ " proposals as part of Flower's " +#~ "repository, the potential for missing " +#~ "links is much higher." +#~ msgstr "" +#~ "Google 문서는 여러 스레드의 토론을 허용합니다. 하지만" +#~ " Google 문서는 프로젝트 외부에서 호스팅되므로 커뮤니티에서" +#~ " 검색할 수 있도록 관리해야 합니다. 모든 제안에 " +#~ "대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. " +#~ "Flower 저장소의 일부로 제안서를 보낼 때와 비교하면" +#~ " 링크가 누락될 가능성이 훨씬 더 높습니다." + +#~ msgid "FED - Flower Enhancement Doc" +#~ msgstr "FED - Flower 개선 문서" -#~ msgid "d defaults to None." -#~ msgstr "d는 기본값이 None입니다." +#~ msgid "" +#~ "Along with model parameters, Flower can" +#~ " send configuration values to clients. " +#~ "Configuration values can be used for " +#~ "various purposes. They are, for example," +#~ " a popular way to control client-" +#~ "side hyperparameters from the server." +#~ msgstr "" +#~ "모델 파라미터와 함께 Flower는 설정 값을 클라이언트에" +#~ " 전송할 수 있습니다. 구성 값은 다양한 용도로 " +#~ "사용할 수 있습니다. 예를 들어 서버에서 클라이언트 " +#~ "측 하이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#~ msgid "Update R from dict/iterable E and F." -#~ msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." +#~ msgid "" +#~ "Configuration values are represented as " +#~ "a dictionary with ``str`` keys and " +#~ "values of type ``bool``, ``bytes``, " +#~ "``double`` (64-bit precision float), ``int``," +#~ " or ``str`` (or equivalent types in" +#~ " different languages). Here is an " +#~ "example of a configuration dictionary in" +#~ " Python:" +#~ msgstr "" +#~ "구성 값은 ``str`` 키와 ``bool``, ``bytes``," +#~ " ``double``(64비트 정밀도 정수), ``int`` 또는 " +#~ "``str``(또는 다른 언어의 동등한 유형) 유형의 값으로" +#~ " 구성된 사전으로 표현됩니다. 다음은 Python의 구성 " +#~ "사전 예제입니다:" #~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" -#~ msgstr "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" +#~ "One can, for example, convert a " +#~ "list of floating-point numbers to " +#~ "a JSON string, then send the JSON" +#~ " string using the configuration dictionary," +#~ " and then convert the JSON string " +#~ "back to a list of floating-point" +#~ " numbers on the client." +#~ msgstr "" +#~ "예를 들어 부동 소수점 숫자 목록을 JSON " +#~ "문자열로 변환한 다음 구성 dictionary을 사용하여 " +#~ "JSON 문자열을 전송한 다음 클라이언트에서 다시 부동 " +#~ "소수점 숫자 목록으로 변환할 수 있습니다." #~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" -#~ msgstr "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like ``FedAvg``. " +#~ "Built-in strategies support so-called " +#~ "configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." +#~ msgstr "" +#~ "클라이언트에 구성 값을 보내는 가장 쉬운 방법은 " +#~ ":code:`FedAvg`와 같은 기본 제공 전략을 사용하는 " +#~ "것입니다. 기본 제공 전략은 소위 구성 함수를 " +#~ "지원합니다. 구성 함수는 내장 전략이 현재 단계의 " +#~ "구성 사전을 가져오기 위해 호출하는 함수입니다. 그런 " +#~ "다음 해당 단계 동안 선택된 모든 클라이언트에 구성" +#~ " 사전을 전달합니다." #~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter ``on_fit_config_fn``:" #~ msgstr "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" +#~ "기본 제공 전략이 이 함수를 사용하도록 하려면 " +#~ "초기화 중에 매개 변수 :code:`on_fit_config_fn`을 " +#~ "사용하여 ``FedAvg``에 이 함수를 전달하면 됩니다:" #~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" -#~ msgstr "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" +#~ "One the client side, we receive " +#~ "the configuration dictionary in ``fit``:" +#~ msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" -#~ msgid ":py:obj:`DRIVER_CONNECT `\\" -#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" +#~ msgid "" +#~ "There is also an `on_evaluate_config_fn` " +#~ "to configure evaluation, which works the" +#~ " same way. They are separate " +#~ "functions because one might want to " +#~ "send different configuration values to " +#~ "`evaluate` (for example, to use a " +#~ "different batch size)." +#~ msgstr "" +#~ "평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 " +#~ "방식으로 작동합니다. 다른 배치 크기를 사용하기 위해 " +#~ "다른 구성 값을 `evaluate`로 보내려고 할 수 " +#~ "있기 때문에 이 함수는 별도의 함수입니다." -#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" -#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgid "" +#~ "The built-in strategies call this " +#~ "function every round (that is, every " +#~ "time `Strategy.configure_fit` or " +#~ "`Strategy.configure_evaluate` runs). Calling " +#~ "`on_evaluate_config_fn` every round allows us" +#~ " to vary/change the config dict over" +#~ " consecutive rounds. If we wanted to" +#~ " implement a hyperparameter schedule, for" +#~ " example, to increase the number of" +#~ " local epochs during later rounds, we" +#~ " could do the following:" +#~ msgstr "" +#~ "기본 제공 전략은 매 라운드마다 이 함수를 " +#~ "호출합니다(즉, `Strategy.configure_fit` 또는 " +#~ "`Strategy.configure_evaluate`가 실행될 때마다). 매 " +#~ "라운드마다 `on_evaluate_config_fn`을 호출하면 연속된 라운드에서" +#~ " config dict를 변경/변경할 수 있습니다. 예를 " +#~ "들어 이후 라운드에서 로컬 에포크 수를 늘리기 위해" +#~ " 하이퍼파라미터 일정을 구현하려면 다음과 같이 할 수" +#~ " 있습니다:" + +#~ msgid "The ``FedAvg`` strategy will call this function *every round*." +#~ msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." + +#~ msgid "Configuring individual clients" +#~ msgstr "개별 클라이언트 구성" #~ msgid "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" -#~ msgstr "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" +#~ "In some cases, it is necessary to" +#~ " send different configuration values to " +#~ "different clients." +#~ msgstr "경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." #~ msgid "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes ``FedAvg`` by adding a custom" +#~ " ``\"hello\": \"world\"`` configuration key/value" +#~ " pair to the config dict of a" +#~ " *single client* (only the first " +#~ "client in the list, the other " +#~ "clients in this round to not " +#~ "receive this \"special\" config value):" +#~ msgstr "" +#~ "이는 기존 전략을 사용자 지정하거나 :doc:`implementing" +#~ " a custom strategy from scratch " +#~ "`를 통해 수행할 " +#~ "수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " +#~ "추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예입니다:" +#~ " \"world\"`` 구성 키/값 쌍을 *단일 클라이언트*의" +#~ " config dict에 추가합니다(목록의 첫 번째 클라이언트만," +#~ " 이 라운드의 다른 클라이언트는 이 \"특별한\" 구성" +#~ " 값을 수신하지 않음):" + +#~ msgid "Configure logging" +#~ msgstr "로깅 구성" + +#~ msgid "" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" #~ msgstr "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" +#~ "Flower 로거는 federated 학습 워크로드에서 발생하는 " +#~ "모든 핵심 이벤트를 추적합니다. 기본적으로 표준 메시지 " +#~ "형식에 따라 정보를 표시합니다:" #~ msgid "" -#~ "An identifier that can be used " -#~ "when loading a particular data partition" -#~ " for a ClientApp. Making use of " -#~ "this identifier is more relevant when" -#~ " conducting simulations." +#~ "containing relevant information including: log" +#~ " message level (e.g. ``INFO``, ``DEBUG``)," +#~ " a timestamp, the line where the " +#~ "logging took place from, as well " +#~ "as the log message itself. In this" +#~ " way, the logger would typically " +#~ "display information on your terminal as" +#~ " follows:" #~ msgstr "" -#~ "클라이언트 앱의 특정 데이터 파티션을 로드할 때 " -#~ "사용할 수 있는 식별자입니다. 시뮬레이션을 수행할 때 " -#~ "이 식별자를 사용하는 것이 더 적절합니다." +#~ "로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), " +#~ "타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 " +#~ "등 관련 정보를 포함합니다. 이러한 방식으로 로거는 " +#~ "일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#~ msgid ":py:obj:`partition_id `\\" -#~ msgstr ":py:obj:`partition_id `\\" +#~ msgid "Saving log to file" +#~ msgstr "파일에 로그 저장" -#~ msgid "An identifier telling which data partition a ClientApp should use." -#~ msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ "``fl.server.start_server``) and when using the" +#~ " ``VirtualClientEngine`` (i.e. when you do" +#~ " ``fl.simulation.start_simulation``). In some " +#~ "situations you might want to save " +#~ "this log to disk. You can do " +#~ "so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" +#~ "기본적으로 Flower 로그는 Federated 학습 워크로드를 " +#~ "실행하는 터미널에 출력됩니다. 이는 gRPC 기반 " +#~ "페더레이션(즉,:code:`fl.simulation.start_simulation`를 실행하는 경우)과" +#~ " :code:`VirtualClientEngine`을 사용하는 경우(즉, " +#~ ":코드:`fl.simulation.start_simulation`을 실행하는 경우) 모두에" +#~ " 적용됩니다. 경우에 따라 이 로그를 디스크에 저장하고" +#~ " 싶을 수도 있습니다. 이 경우 " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " 함수를 호출하여 저장할 수 있습니다. 예를 들어:" -#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" -#~ msgstr "" +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to ``log.txt``. This file will be" +#~ " created in the same directory as " +#~ "were you are running the code " +#~ "from. If we inspect we see the " +#~ "log above is also recorded but " +#~ "prefixing with ``identifier`` each line:" +#~ msgstr "" +#~ "위와 같이 하면 Flower는 터미널에 표시되는 로그를 " +#~ ":code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한" +#~ " 디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 " +#~ "로그도 기록되지만 각 줄 앞에 :code:`identifier` " +#~ "접두사가 붙는 것을 확인할 수 있습니다:" + +#~ msgid "Log your own messages" +#~ msgstr "나만의 메시지 기록" -#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgid "" +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." #~ msgstr "" +#~ "애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에" +#~ " 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과" +#~ " 같이 쉽게 추가할 수 있습니다." -#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." -#~ msgstr "Flower SuperLink(Driver API 및 Fleet API)를 실행하세요." +#~ msgid "" +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." +#~ msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." + +#~ msgid "Log to a remote service" +#~ msgstr "원격 서비스에 로그인" #~ msgid "" -#~ ":py:obj:`LegacyContext `\\ " -#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" -#~ msgstr "" +#~ "The ``fl.common.logger.configure`` function, also" +#~ " allows specifying a host to which" +#~ " logs can be pushed (via ``POST``)" +#~ " through a native Python " +#~ "``logging.handler.HTTPHandler``. This is a " +#~ "particularly useful feature in ``gRPC``-based" +#~ " Federated Learning workloads where " +#~ "otherwise gathering logs from all " +#~ "entities (i.e. the server and the " +#~ "clients) might be cumbersome. Note that" +#~ " in Flower simulation, the server " +#~ "automatically displays all logs. You can" +#~ " still specify a ``HTTPHandler`` should " +#~ "you wish to backup or analyze the" +#~ " logs somewhere else." +#~ msgstr "" +#~ "또한 :code:`fl.common.logger.configure` 함수를 사용하면 " +#~ "네이티브 Python :code:`logging.handler.HTTPHandler`를 통해" +#~ " 로그를 푸시할 수 있는 호스트를 지정할 수 " +#~ "있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버" +#~ " 및 클라이언트)에서 로그를 수집하는 것이 번거로울 수" +#~ " 있는 :code:`gRPC` 기반 Federated 학습 " +#~ "워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 " +#~ "서버가 모든 로그를 자동으로 표시합니다. 로그를 다른 " +#~ "곳에 백업하거나 분석하려는 경우 :code:`HTTPHandler`를 " +#~ "지정할 수 있습니다." + +#~ msgid "Monitor simulation" +#~ msgstr "모니터 시뮬레이션" -#~ msgid ":py:obj:`flwr.server.strategy `\\" -#~ msgstr "" +#~ msgid "" +#~ "Flower allows you to monitor system " +#~ "resources while running your simulation. " +#~ "Moreover, the Flower simulation engine " +#~ "is powerful and enables you to " +#~ "decide how to allocate resources per " +#~ "client manner and constrain the total" +#~ " usage. Insights from resource consumption" +#~ " can help you make smarter decisions" +#~ " and speed up the execution time." +#~ msgstr "" +#~ "Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 " +#~ "리소스를 모니터링할 수 있습니다. 또한 Flower 시뮬레이션" +#~ " 엔진은 강력하며 클라이언트별 리소스 할당 방법을 " +#~ "결정하고 총 사용량을 제한할 수 있습니다. 리소스 " +#~ "소비에 대한 인사이트를 통해 더 현명한 결정을 " +#~ "내리고 실행 시간을 단축할 수 있습니다." -#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgid "" +#~ "The specific instructions assume you are" +#~ " using macOS and have the `Homebrew" +#~ " `_ package manager installed." #~ msgstr "" +#~ "구체적인 지침은 macOS를 사용 중이고 'Homebrew " +#~ "`_ 패키지 관리자가 설치되어 있다고 " +#~ "가정합니다." -#~ msgid "run\\_driver\\_api" -#~ msgstr "" +#~ msgid "Downloads" +#~ msgstr "다운로드" -#~ msgid "run\\_fleet\\_api" -#~ msgstr "" +#~ msgid "" +#~ "`Prometheus `_ is used " +#~ "for data collection, while `Grafana " +#~ "`_ will enable you to" +#~ " visualize the collected data. They " +#~ "are both well integrated with `Ray " +#~ "`_ which Flower uses " +#~ "under the hood." +#~ msgstr "" +#~ "`Prometheus `_는 데이터 수집에 " +#~ "사용되며, `Grafana `_는 수집된 " +#~ "데이터를 시각화할 수 있게 해줍니다. 이 두 도구는" +#~ " 모두 Flower가 내부적으로 사용하는 `Ray " +#~ "`_와 잘 통합되어 있습니다." #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg+ configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" -#~ msgstr "" +#~ "Overwrite the configuration files (depending" +#~ " on your device, it might be " +#~ "installed on a different path)." +#~ msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." -#~ msgid "key shares." -#~ msgstr "" +#~ msgid "If you are on an M1 Mac, it should be:" +#~ msgstr "M1 Mac을 사용 중이라면:" + +#~ msgid "On the previous generation Intel Mac devices, it should be:" +#~ msgstr "이전 세대 Intel Mac 장치에서는:" #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" -#~ msgstr "" +#~ "Open the respective configuration files " +#~ "and change them. Depending on your " +#~ "device, use one of the two " +#~ "following commands:" +#~ msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" #~ msgid "" -#~ ":py:obj:`start_simulation `\\" -#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ "and then delete all the text in" +#~ " the file and paste a new " +#~ "Prometheus config you see below. You " +#~ "may adjust the time intervals to " +#~ "your requirements:" #~ msgstr "" +#~ "를 입력한 다음 파일의 모든 텍스트를 삭제하고 " +#~ "아래에 표시된 새 Prometheus 설정을 붙여넣습니다. " +#~ "요구 사항에 따라 시간 간격을 조정할 수 있습니다:" #~ msgid "" -#~ "'A dictionary, e.g {\"\": , " -#~ "\"\": } to configure a " -#~ "backend. Values supported in are" -#~ " those included by " -#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ "Now after you have edited the " +#~ "Prometheus configuration, do the same " +#~ "with the Grafana configuration files. " +#~ "Open those using one of the " +#~ "following commands as before:" #~ msgstr "" +#~ "이제 Prometheus 구성을 편집한 후 Grafana 구성" +#~ " 파일에 대해서도 동일한 작업을 수행합니다. 이전과 " +#~ "마찬가지로 다음 명령 중 하나를 사용하여 파일을 " +#~ "엽니다:" #~ msgid "" -#~ "When diabled, only INFO, WARNING and " -#~ "ERROR log messages will be shown. " -#~ "If enabled, DEBUG-level logs will " -#~ "be displayed." -#~ msgstr "" +#~ "Your terminal editor should open and " +#~ "allow you to apply the following " +#~ "configuration as before." +#~ msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type Client. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." -#~ msgstr "" +#~ "Congratulations, you just downloaded all " +#~ "the necessary software needed for " +#~ "metrics tracking. Now, let’s start it." +#~ msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." + +#~ msgid "Tracking metrics" +#~ msgstr "매트릭 트래킹" #~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." -#~ msgstr "" +#~ "Before running your Flower simulation, " +#~ "you have to start the monitoring " +#~ "tools you have just installed and " +#~ "configured." +#~ msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." #~ msgid "" -#~ "List `client_id`s for each client. This" -#~ " is only required if `num_clients` is" -#~ " not set. Setting both `num_clients` " -#~ "and `clients_ids` with `len(clients_ids)` not" -#~ " equal to `num_clients` generates an " -#~ "error." -#~ msgstr "" +#~ "Please include the following argument in" +#~ " your Python code when starting a " +#~ "simulation." +#~ msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." + +#~ msgid "Now, you are ready to start your workload." +#~ msgstr "이제 워크로드를 시작할 준비가 되었습니다." #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Convolutional Neural " -#~ "Network on CIFAR10 using Flower and " -#~ "PyTorch." -#~ msgstr "" +#~ "Shortly after the simulation starts, you" +#~ " should see the following logs in " +#~ "your terminal:" +#~ msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" + +#~ msgid "You can look at everything at http://127.0.0.1:8265 ." +#~ msgstr "``_ 에서 모든 것을 볼 수 있습니다." #~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight-updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." -#~ msgstr "" +#~ "It's a Ray Dashboard. You can " +#~ "navigate to Metrics (on the left " +#~ "panel, the lowest option)." +#~ msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." #~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started. We first need to install " -#~ "Flower. You can do this by running" -#~ " :" -#~ msgstr "" +#~ "Or alternatively, you can just see " +#~ "them in Grafana by clicking on the" +#~ " right-up corner, “View in Grafana”." +#~ " Please note that the Ray dashboard" +#~ " is only accessible during the " +#~ "simulation. After the simulation ends, " +#~ "you can only use Grafana to " +#~ "explore the metrics. You can start " +#~ "Grafana by going to " +#~ "``http://localhost:3000/``." +#~ msgstr "" +#~ "또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 " +#~ "클릭하여 Grafana에서 바로 확인할 수도 있습니다. Ray" +#~ " 대시보드는 시뮬레이션 중에만 액세스할 수 있다는 점에" +#~ " 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여" +#~ " 메트릭을 탐색할 수만 있습니다. " +#~ "``http://localhost:3000/``로 이동하여 Grafana를 시작할 " +#~ "수 있습니다." #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead and install PyTorch and " -#~ "the **torchvision** library:" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port ``3000`` on your" +#~ " machine as long as they are " +#~ "running." #~ msgstr "" +#~ "시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다." +#~ " 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " +#~ ":code:`3000` 등을 차단하므로 이 작업이 중요합니다." + +#~ msgid "Resource allocation" +#~ msgstr "리소스 할당" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Deep Learning with" -#~ " PyTorch " -#~ "`_." -#~ msgstr "" +#~ "You must understand how the Ray " +#~ "library works to efficiently allocate " +#~ "system resources to simulation clients " +#~ "on your own." +#~ msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and PyTorch related " -#~ "packages:" -#~ msgstr "" +#~ "Initially, the simulation (which Ray " +#~ "handles under the hood) starts by " +#~ "default with all the available resources" +#~ " on the system, which it shares " +#~ "among the clients. It doesn't mean " +#~ "it divides it equally among all of" +#~ " them, nor that the model training" +#~ " happens at all of them " +#~ "simultaneously. You will learn more " +#~ "about that in the later part of" +#~ " this blog. You can check the " +#~ "system resources by running the " +#~ "following:" +#~ msgstr "" +#~ "처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서" +#~ " 사용 가능한 모든 리소스를 사용하여 시작되며, 이" +#~ " 리소스는 클라이언트 간에 공유됩니다. 그렇다고 해서 " +#~ "모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 " +#~ "동시에 모델 학습이 이루어지는 것은 아닙니다. 이에 " +#~ "대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. " +#~ "다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" + +#~ msgid "In Google Colab, the result you see might be similar to this:" +#~ msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#~ msgid "In addition, we define the device allocation in PyTorch with:" -#~ msgstr "" +#~ msgid "" +#~ "However, you can overwrite the defaults." +#~ " When starting a simulation, do the" +#~ " following (you don't need to " +#~ "overwrite all of them):" +#~ msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" + +#~ msgid "Let’s also specify the resource for a single client." +#~ msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." #~ msgid "" -#~ "We use PyTorch to load CIFAR10, a" -#~ " popular colored image classification " -#~ "dataset for machine learning. The " -#~ "PyTorch :code:`DataLoader()` downloads the " -#~ "training and test data that are " -#~ "then normalized." +#~ "Now comes the crucial part. Ray " +#~ "will start a new client only when" +#~ " it has all the required resources" +#~ " (such that they run in parallel) " +#~ "when the resources allow." #~ msgstr "" +#~ "이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는" +#~ " 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는" +#~ " 등) 새 클라이언트를 시작합니다." #~ msgid "" -#~ "Define the loss and optimizer with " -#~ "PyTorch. The training of the dataset " -#~ "is done by looping over the " -#~ "dataset, measure the corresponding loss " -#~ "and optimize it." -#~ msgstr "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ "``client_num_gpus = 0.5`` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ "``client_num_gpus = 2``, the simulation " +#~ "wouldn't start (even if you had 2" +#~ " GPUs but decided to set 1 in" +#~ " ``ray_init_args``)." +#~ msgstr "" +#~ "위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 " +#~ "동시에 실행되지 않습니다. :code:`client_num_gpus = " +#~ "0.5` 를 설정하면 두 개의 클라이언트를 실행할 " +#~ "수 있으므로 동시에 실행할 수 있습니다. 사용 " +#~ "가능한 리소스보다 더 많은 리소스를 요구하지 않도록 " +#~ "주의하세요. :code:`client_num_gpus = 2`를 지정하면 " +#~ "시뮬레이션이 시작되지 않습니다(GPU가 2개이지만 " +#~ ":code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." + +#~ msgid "Q: I don't see any metrics logged." +#~ msgstr "질문: 기록된 메트릭이 보이지 않습니다." #~ msgid "" -#~ "Define then the validation of the " -#~ "machine learning network. We loop over" -#~ " the test set and measure the " -#~ "loss and accuracy of the test set." +#~ "A: The timeframe might not be " +#~ "properly set. The setting is in " +#~ "the top right corner (\"Last 30 " +#~ "minutes\" by default). Please change the" +#~ " timeframe to reflect the period when" +#~ " the simulation was running." #~ msgstr "" +#~ "A: 기간이 제대로 설정되지 않았을 수 있습니다. " +#~ "설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). " +#~ "시뮬레이션이 실행된 기간을 반영하도록 기간을 변경해 주세요." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a PyTorch machine learning model," -#~ " we use the functions for the " -#~ "Flower clients." +#~ "Q: I see “Grafana server not " +#~ "detected. Please make sure the Grafana" +#~ " server is running and refresh this" +#~ " page” after going to the Metrics " +#~ "tab in Ray Dashboard." #~ msgstr "" +#~ "질문: \"Grafana 서버가 감지되지 않았습니다. Ray " +#~ "대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가" +#~ " 실행 중인지 확인하고 이 페이지를 새로고침하세요.\"라는 " +#~ "메시지가 표시됩니다." #~ msgid "" -#~ "The Flower clients will use a " -#~ "simple CNN adapted from 'PyTorch: A " -#~ "60 Minute Blitz':" -#~ msgstr "" +#~ "A: You probably don't have Grafana " +#~ "running. Please check the running " +#~ "services" +#~ msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" #~ msgid "" -#~ "After loading the data set with " -#~ ":code:`load_data()` we define the Flower " -#~ "interface." +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to http://127.0.0.1:8265." #~ msgstr "" +#~ "Q: ``_로 이동할 때 \"이 " +#~ "사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "PyTorch. Implementing :code:`NumPyClient` usually" -#~ " means defining the following methods " -#~ "(:code:`set_parameters` is optional though):" -#~ msgstr "" +#~ "A: Either the simulation has already " +#~ "finished, or you still need to " +#~ "start Prometheus." +#~ msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." -#~ msgid "receive the updated local model weights" -#~ msgstr "" +#~ msgid "Resources" +#~ msgstr "리소스" -#~ msgid "which can be implemented in the following way:" +#~ msgid "" +#~ "Ray Dashboard: https://docs.ray.io/en/latest/ray-" +#~ "observability/getting-started.html" #~ msgstr "" +#~ "Ray 대시보드: ``_" + +#~ msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +#~ msgstr "Ray 메트릭: ``_" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-" -#~ "pytorch`." -#~ msgstr "" +#~ "The ``VirtualClientEngine`` schedules, launches " +#~ "and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the ``VirtualClientEngine`` are:" +#~ msgstr "" +#~ ":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약," +#~ " 실행 및 관리합니다. 이러한 클라이언트는 `non-" +#~ "virtual` 클라이언트(예: `flwr.client.start_client `_ 명령을 통해 " +#~ "실행하는 클라이언트)와 동일하며, `flwr.client.NumPyClient " +#~ "`_에서 상속하는" +#~ " 클래스 생성으로 구성될 수 있으므로 동일한 방식으로" +#~ " 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 " +#~ "의해 관리되는 클라이언트는 다음과 같습니다:" #~ msgid "" -#~ "In this example, we split the " -#~ "dataset into two partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " -#~ "Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`node_id`:" -#~ msgstr "" +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" +#~ msgstr "" +#~ "Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, " +#~ "전략 및 유틸리티 함수를 정의하여 데이터 세트를 " +#~ "다운로드하고 로드(및 파티션)해야 합니다. 이 작업을 마친" +#~ " 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면" +#~ " 되며, 최소한의 예시는 다음과 같습니다:" #~ msgid "" -#~ "The :code:`self.bst` is used to keep " -#~ "the Booster objects that remain " -#~ "consistent across rounds, allowing them " -#~ "to store predictions from trees " -#~ "integrated in earlier rounds and " -#~ "maintain other essential data structures " -#~ "for training." -#~ msgstr "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the ``ray_init_args`` input argument " +#~ "to ``start_simulation`` which the VCE " +#~ "internally passes to Ray's ``ray.init`` " +#~ "command. For a complete list of " +#~ "settings you can configure check the " +#~ "`ray.init `_ documentation. " +#~ "Do not set ``ray_init_args`` if you " +#~ "want the VCE to use all your " +#~ "system's CPUs and GPUs." +#~ msgstr "" +#~ "기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU," +#~ " 모든 GPU 등)에 액세스할 수 있으며, 이는 " +#~ "Ray를 시작할 때의 기본 동작이기도 합니다. 그러나 " +#~ "일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를" +#~ " 제한하고 싶을 수 있습니다. 이 설정은 VCE가 " +#~ "내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 " +#~ ":code:`start_simulation`에 대한 :code:`ray_init_args` " +#~ "입력 인수를 통해 수행할 수 있습니다. 구성할 수" +#~ " 있는 전체 설정 목록은 `ray.init " +#~ "`_ 설명서를 확인하세요. VCE가 시스템의 모든" +#~ " CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를" +#~ " 설정하지 마세요." #~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. the " -#~ "returned Booster object and config are" -#~ " stored in :code:`self.bst` and " -#~ ":code:`self.config`, respectively. From the " -#~ "second round, we load the global " -#~ "model sent from server to " -#~ ":code:`self.bst`, and then update model " -#~ "weights on local training data with " -#~ "function :code:`local_boost` as follows:" -#~ msgstr "" +#~ "By default the ``VirtualClientEngine`` assigns" +#~ " a single CPU core (and nothing " +#~ "else) to each virtual client. This " +#~ "means that if your system has 10" +#~ " cores, that many virtual clients can" +#~ " be concurrently running." +#~ msgstr "" +#~ "기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에" +#~ " 단일 CPU 코어를 할당합니다(그 외에는 아무것도 " +#~ "할당하지 않음). 즉, 시스템에 코어가 10개인 경우 " +#~ "그만큼 많은 가상 클라이언트를 동시에 실행할 수 " +#~ "있습니다." + +#~ msgid "``num_cpus`` indicates the number of CPU cores a client would get." +#~ msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." #~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`self.bst.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." -#~ msgstr "" +#~ "``num_gpus`` indicates the **ratio** of " +#~ "GPU memory a client gets assigned." +#~ msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." + +#~ msgid "Let's see a few examples:" +#~ msgstr "몇 가지 예를 살펴보겠습니다:" #~ msgid "" -#~ "In :code:`evaluate`, we call " -#~ ":code:`self.bst.eval_set` function to conduct " -#~ "evaluation on valid set. The AUC " -#~ "value will be returned." -#~ msgstr "" +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." +#~ msgstr "" +#~ "리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 " +#~ "사용자 지정 리소스를 정의하는 방법에 대한 모든 " +#~ "복잡한 세부 사항을 이해하려면 'Ray 문서 " +#~ "'를 참조하세요." #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client`and call" -#~ " :code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." -#~ msgstr "" +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" +#~ msgstr "" +#~ "Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 " +#~ "몇 가지 Flower 시뮬레이션 예제는 `Flower " +#~ "레포지토리 `_에서 제공됩니다. " +#~ "Google Colab에서도 실행할 수 있습니다:" #~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients." +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." #~ msgstr "" +#~ "`Tensorflow/Keras 시뮬레이션 " +#~ "`_: 100개의 클라이언트가 공동으로 MNIST에서 " +#~ "MLP 모델을 훈련합니다." #~ msgid "" -#~ "Welcome to the third part of the" -#~ " Flower federated learning tutorial. In " -#~ "previous parts of this tutorial, we " -#~ "introduced federated learning with PyTorch " -#~ "and Flower (`part 1 " -#~ "`__) and we " -#~ "learned how strategies can be used " -#~ "to customize the execution on both " -#~ "the server and the clients (`part " -#~ "2 `__)." +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" #~ msgstr "" +#~ "모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은" +#~ " :ref:`simulation considerations `에서 확인하세요)" #~ msgid "" -#~ "In this notebook, we'll continue to " -#~ "customize the federated learning system " -#~ "we built previously by creating a " -#~ "custom version of FedAvg (again, using" -#~ " `Flower `__ and `PyTorch " -#~ "`__)." +#~ "Pass ``ray_init_args={\"address\"=\"auto\"}`` to " +#~ "`start_simulation `_ so the " +#~ "``VirtualClientEngine`` attaches to a running" +#~ " Ray instance." #~ msgstr "" +#~ ":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation" +#~ " `_에 전달하여 " +#~ ":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에" +#~ " 연결되도록 합니다." + +#~ msgid "Multi-node simulation good-to-know" +#~ msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" #~ msgid "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ and join " -#~ "the Flower community on Slack to " -#~ "connect, ask questions, and get help:" -#~ " `Join Slack `__" -#~ " 🌼 We'd love to hear from you" -#~ " in the ``#introductions`` channel! And " -#~ "if anything is unclear, head over " -#~ "to the ``#questions`` channel." -#~ msgstr "" +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" +#~ msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" -#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgid "" +#~ "User ``ray status`` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the ``VirtualClientEngine``." #~ msgstr "" +#~ "사용자는 :code:`ray status`를 통해 헤드 노드에 " +#~ "연결된 모든 노드와 :code:`VirtualClientEngine`에 사용 " +#~ "가능한 총 리소스를 확인할 수 있습니다." + +#~ msgid "Considerations for simulations" +#~ msgstr "시뮬레이션 시 고려 사항" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap everything in their own " -#~ "``DataLoader``. We introduce a new " -#~ "parameter ``num_clients`` which allows us " -#~ "to call ``load_datasets`` with different " -#~ "numbers of clients." -#~ msgstr "" +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." +#~ msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." #~ msgid "" -#~ "To implement the Flower client, we " -#~ "(again) create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``. Here, we also pass the" -#~ " ``cid`` to the client and use " -#~ "it log additional details:" -#~ msgstr "" +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." +#~ msgstr "" +#~ "현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를" +#~ " 프로토타이핑하든, 여러 고성능 GPU 노드에서 복잡한 " +#~ "FL 파이프라인을 훈련하든 상관없이 시뮬레이션 모드에서 " +#~ "Federated 학습 워크로드를 실행할 수 있습니다. " +#~ "VCE에 더 많은 기능을 추가하는 동안, 아래에서는 " +#~ "Flower로 FL 파이프라인을 설계할 때 염두에 두어야" +#~ " 할 몇 가지 사항을 강조합니다. 또한 현재 " +#~ "구현에서 몇 가지 제한 사항을 강조합니다." + +#~ msgid "GPU resources" +#~ msgstr "GPU 리소스" #~ msgid "" -#~ "Let's go deeper and see what it" -#~ " takes to move from ``NumPyClient`` " -#~ "to ``Client``!" -#~ msgstr "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key ``num_gpus`` in ``client_resources``." +#~ " This being said, Ray (used " +#~ "internally by the VCE) is by " +#~ "default:" +#~ msgstr "" +#~ "VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를" +#~ " 지정하는 클라이언트에 GPU 메모리 공유를 할당합니다. " +#~ "즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 " +#~ "사용됩니다:" #~ msgid "" -#~ "So far, we've implemented our client " -#~ "by subclassing ``flwr.client.NumPyClient``. The " -#~ "three methods we implemented are " -#~ "``get_parameters``, ``fit``, and ``evaluate``. " -#~ "Finally, we wrap the creation of " -#~ "instances of this class in a " -#~ "function called ``client_fn``:" -#~ msgstr "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set ``num_gpus=0.5`` and " +#~ "you have two GPUs in your system" +#~ " with different (e.g. 32GB and 8GB)" +#~ " VRAM amounts, they both would run" +#~ " 2 clients concurrently." +#~ msgstr "" +#~ "GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다." +#~ " 즉, 시스템에 서로 다른(예: 32GB와 8GB) " +#~ "VRAM 용량을 가진 두 개의 GPU가 있고 " +#~ ":code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의" +#~ " 클라이언트를 실행하게 됩니다." #~ msgid "" -#~ "We've seen this before, there's nothing" -#~ " new so far. The only *tiny* " -#~ "difference compared to the previous " -#~ "notebook is naming, we've changed " -#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " -#~ "``client_fn`` to ``numpyclient_fn``. Let's run" -#~ " it to see the output we get:" +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" #~ msgstr "" +#~ "관련 없는(즉, VCE에 의해 생성되지 않은) 다른 " +#~ "워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서" +#~ " 두 가지 시사점을 얻을 수 있습니다:" #~ msgid "" -#~ "This works as expected, two clients " -#~ "are training for three rounds of " -#~ "federated learning." -#~ msgstr "" +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" +#~ msgstr "" +#~ "집계 후 '글로벌 모델'을 평가하려면 Flower 서버에" +#~ " GPU가 필요할 수 있습니다(예: `evaluate method" +#~ " `_를 사용할 때)" #~ msgid "" -#~ "Let's dive a little bit deeper and" -#~ " discuss how Flower executes this " -#~ "simulation. Whenever a client is " -#~ "selected to do some work, " -#~ "``start_simulation`` calls the function " -#~ "``numpyclient_fn`` to create an instance " -#~ "of our ``FlowerNumPyClient`` (along with " -#~ "loading the model and the data)." -#~ msgstr "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ "``CUDA_VISIBLE_DEVICES=\"\"`` when launching " +#~ "your experiment." +#~ msgstr "" +#~ "동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을" +#~ " 실행하려면, 실험을 시작할 때 " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 " +#~ "합니다." #~ msgid "" -#~ "`Check out Flower Code Examples " -#~ "`__" -#~ msgstr "" +#~ "In addition, the GPU resource limits " +#~ "passed to ``client_resources`` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." +#~ msgstr "" +#~ "또한 :code:`client_resources`에 전달된 GPU 리소스 " +#~ "제한이 '강제'되지 않아(즉, 초과할 수 있음) 클라이언트가" +#~ " 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 " +#~ "VRAM을 사용하는 상황이 발생할 수 있습니다." + +#~ msgid "TensorFlow with GPUs" +#~ msgstr "GPU를 사용한 TensorFlow" #~ msgid "" -#~ "`Watch Flower Summit 2023 videos " -#~ "`__" -#~ msgstr "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." +#~ msgstr "" +#~ "`TensorFlow와 함께 GPU를 사용 " +#~ "`_하면 프로세스에 보이는 " +#~ "모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. " +#~ "이는 최적화 목적으로 TensorFlow에서 수행됩니다. 그러나 " +#~ "GPU를 여러 개의 '가상' 클라이언트로 분할하려는 FL" +#~ " 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 " +#~ "아닙니다. 다행히도 '메모리 증가 활성화 " +#~ "`_'를" +#~ " 통해 이 기본 동작을 비활성화할 수 있습니다." #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower, " -#~ "`Flower Datasets `__ " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." -#~ msgstr "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of ``actor_kwargs`` we can pass" +#~ " the reserved key `\"on_actor_init_fn\"` in" +#~ " order to specify a function to " +#~ "be executed upon actor initialization. " +#~ "In this case, to enable GPU growth" +#~ " for TF workloads. It would look " +#~ "as follows:" +#~ msgstr "" +#~ "이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서" +#~ " 생성한 각 액터에서 수행해야 합니다. " +#~ ":code:`actor_kwargs`를 통해 예약 키 " +#~ "`\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 " +#~ "실행할 함수를 지정할 수 있습니다. 이 경우 TF" +#~ " 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 " +#~ "같이 보입니다:" + +#~ msgid "Multi-node setups" +#~ msgstr "멀티 노드 설정" -#~ msgid "Loading the data" -#~ msgstr "" +#~ msgid "" +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." +#~ msgstr "" +#~ "VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 " +#~ "실행할지 제어하는 방법을 제공하지 않습니다. 즉, 클라이언트가" +#~ " 실행하는 데 필요한 리소스가 하나 이상의 노드에 " +#~ "있는 경우 해당 노드 중 어느 노드에나 클라이언트" +#~ " 워크로드가 예약될 수 있습니다. FL 프로세스 " +#~ "후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 " +#~ "노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에" +#~ " 액세스하는 방식에 따라 모든 노드에 모든 데이터 " +#~ "세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 " +#~ "위해 데이터 세트 제공 메커니즘(예: nfs, 데이터베이스" +#~ " 사용)을 사용해야 할 수 있습니다." #~ msgid "" -#~ "We simulate having multiple datasets " -#~ "from multiple organizations (also called " -#~ "the \"cross-silo\" setting in federated" -#~ " learning) by splitting the original " -#~ "CIFAR-10 dataset into multiple partitions. " -#~ "Each partition will represent the data" -#~ " from a single organization. We're " -#~ "doing this purely for experimentation " -#~ "purposes, in the real world there's " -#~ "no need for data splitting because " -#~ "each organization already has their own" -#~ " data (so the data is naturally " -#~ "partitioned)." -#~ msgstr "" +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." +#~ msgstr "" +#~ "정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 " +#~ "없음'입니다. 클라이언트 상태는 Flower 클라이언트 클래스의 " +#~ "일부로 구현할 수 있지만, 사용자는 이를 영구 " +#~ "저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 " +#~ "중인 노드와 관계없이 동일한 클라이언트가 검색할 수 " +#~ "있도록 해야 합니다. 이는 어떤 식으로든 클라이언트의 " +#~ "데이터 세트가 일종의 '상태'로 볼 수 있기 때문에" +#~ " 위의 요점과도 관련이 있습니다." #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server." +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ "``start_server`` function. It is usually " +#~ "recommended to adjust a few parameters" +#~ " during instantiation:" #~ msgstr "" +#~ "이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 " +#~ "생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " +#~ "인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 " +#~ "좋습니다:" + +#~ msgid "Legacy example guides" +#~ msgstr "레거시 예제 가이드" + +#~ msgid "flwr is the Flower command line interface." +#~ msgstr "Flower ClientProxy 인스턴스 등록 해제." + +#~ msgid "Options" +#~ msgstr "해결법" + +#~ msgid "Install completion for the current shell." +#~ msgstr "현재 실행에 대한 식별자입니다." #~ msgid "" -#~ "Let's now create the Federated Dataset" -#~ " abstraction that from ``flwr-datasets``" -#~ " that partitions the CIFAR-10. We " -#~ "will create small training and test " -#~ "set for each edge device and wrap" -#~ " each of them into a PyTorch " -#~ "``DataLoader``:" +#~ "Show completion for the current shell," +#~ " to copy it or customize the " +#~ "installation." #~ msgstr "" -#~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4000 training examples and" -#~ " 1000 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ msgid "Build a Flower App into a Flower App Bundle (FAB)." #~ msgstr "" #~ msgid "" -#~ "Let's take a look at the first " -#~ "batch of images and labels in the" -#~ " first training set (i.e., " -#~ "``trainloaders[0]``) before we move on:" +#~ "You can run ``flwr build`` without " +#~ "any arguments to bundle the app " +#~ "located in the current directory. " +#~ "Alternatively, you can you can specify" +#~ " a path using the ``--app`` option" +#~ " to bundle an app located at " +#~ "the provided path. For example:" #~ msgstr "" -#~ msgid "" -#~ "The output above shows a random " -#~ "batch of images from the first " -#~ "``trainloader`` in our list of ten " -#~ "``trainloaders``. It also prints the " -#~ "labels associated with each image (i.e.," -#~ " one of the ten possible labels " -#~ "we've seen above). If you run the" -#~ " cell again, you should see another" -#~ " batch of images." +#~ msgid "``flwr build --app ./apps/flower-hello-world``." #~ msgstr "" -#~ msgid "Defining the model" +#~ msgid "Path of the Flower App to bundle into a FAB" #~ msgstr "" -#~ msgid "Training the model" +#~ msgid "Install a Flower App Bundle." +#~ msgstr "Flower 설치" + +#~ msgid "It can be ran with a single FAB file argument:" #~ msgstr "" -#~ msgid "" -#~ "We now have all the basic building" -#~ " blocks we need: a dataset, a " -#~ "model, a training function, and a " -#~ "test function. Let's put them together" -#~ " to train the model on the " -#~ "dataset of one of our organizations " -#~ "(``trainloaders[0]``). This simulates the " -#~ "reality of most machine learning " -#~ "projects today: each organization has " -#~ "their own data and trains models " -#~ "only on this internal data:" +#~ msgid "``flwr install ./target_project.fab``" #~ msgstr "" -#~ msgid "" -#~ "Training the simple CNN on our " -#~ "CIFAR-10 split for 5 epochs should " -#~ "result in a test set accuracy of" -#~ " about 41%, which is not good, " -#~ "but at the same time, it doesn't" -#~ " really matter for the purposes of" -#~ " this tutorial. The intent was just" -#~ " to show a simplistic centralized " -#~ "training pipeline that sets the stage" -#~ " for what comes next - federated " -#~ "learning!" +#~ msgid "The target install directory can be specified with ``--flwr-dir``:" #~ msgstr "" -#~ msgid "Updating model parameters" +#~ msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" #~ msgstr "" #~ msgid "" -#~ "In federated learning, the server sends" -#~ " the global model parameters to the" -#~ " client, and the client updates the" -#~ " local model with the parameters " -#~ "received from the server. It then " -#~ "trains the model on the local data" -#~ " (which changes the model parameters " -#~ "locally) and sends the updated/changed " -#~ "model parameters back to the server " -#~ "(or, alternatively, it sends just the" -#~ " gradients back to the server, not" -#~ " the full model parameters)." +#~ "This will install ``target_project`` to " +#~ "``./docs/flwr/``. By default, ``flwr-dir`` " +#~ "is equal to:" #~ msgstr "" -#~ msgid "" -#~ "The details of how this works are" -#~ " not really important here (feel free" -#~ " to consult the PyTorch documentation " -#~ "if you want to learn more). In " -#~ "essence, we use ``state_dict`` to access" -#~ " PyTorch model parameter tensors. The " -#~ "parameter tensors are then converted " -#~ "to/from a list of NumPy ndarray's " -#~ "(which Flower knows how to " -#~ "serialize/deserialize):" +#~ msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" #~ msgstr "" -#~ msgid "Implementing a Flower client" +#~ msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" #~ msgstr "" -#~ msgid "" -#~ "With that out of the way, let's" -#~ " move on to the interesting part. " -#~ "Federated learning systems consist of a" -#~ " server and multiple clients. In " -#~ "Flower, we create clients by " -#~ "implementing subclasses of ``flwr.client.Client``" -#~ " or ``flwr.client.NumPyClient``. We use " -#~ "``NumPyClient`` in this tutorial because " -#~ "it is easier to implement and " -#~ "requires us to write less boilerplate." +#~ msgid "``$HOME/.flwr/`` in all other cases" #~ msgstr "" -#~ msgid "" -#~ "To implement the Flower client, we " -#~ "create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``:" +#~ msgid "The desired install path." +#~ msgstr "" + +#~ msgid "Optional argument" +#~ msgstr "선택적 개선 사항" + +#~ msgid "The source FAB file to install." #~ msgstr "" -#~ msgid "" -#~ "``fit``: Receive model parameters from " -#~ "the server, train the model parameters" -#~ " on the local data, and return " -#~ "the (updated) model parameters to the" -#~ " server" +#~ msgid "Get logs from a Flower project run." #~ msgstr "" -#~ msgid "" -#~ "``evaluate``: Receive model parameters from" -#~ " the server, evaluate the model " -#~ "parameters on the local data, and " -#~ "return the evaluation result to the " -#~ "server" +#~ msgid "Flag to stream or print logs from the Flower run" #~ msgstr "" -#~ msgid "" -#~ "Our class ``FlowerClient`` defines how " -#~ "local training/evaluation will be performed" -#~ " and allows Flower to call the " -#~ "local training/evaluation through ``fit`` and" -#~ " ``evaluate``. Each instance of " -#~ "``FlowerClient`` represents a *single client*" -#~ " in our federated learning system. " -#~ "Federated learning systems have multiple " -#~ "clients (otherwise, there's not much to" -#~ " federate), so each client will be" -#~ " represented by its own instance of" -#~ " ``FlowerClient``. If we have, for " -#~ "example, three clients in our workload," -#~ " then we'd have three instances of" -#~ " ``FlowerClient``. Flower calls " -#~ "``FlowerClient.fit`` on the respective " -#~ "instance when the server selects a " -#~ "particular client for training (and " -#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgid "default" #~ msgstr "" -#~ msgid "Using the Virtual Client Engine" +#~ msgid "``True``" +#~ msgstr "``DISTRO``" + +#~ msgid "Required argument" +#~ msgstr "빌드 전달인자" + +#~ msgid "The Flower run ID to query" +#~ msgstr "Flower 커뮤니티 가입하기" + +#~ msgid "Path of the Flower project to run" #~ msgstr "" -#~ msgid "" -#~ "In this notebook, we want to " -#~ "simulate a federated learning system " -#~ "with 10 clients on a single " -#~ "machine. This means that the server " -#~ "and all 10 clients will live on" -#~ " a single machine and share resources" -#~ " such as CPU, GPU, and memory. " -#~ "Having 10 clients would mean having " -#~ "10 instances of ``FlowerClient`` in " -#~ "memory. Doing this on a single " -#~ "machine can quickly exhaust the " -#~ "available memory resources, even if only" -#~ " a subset of these clients " -#~ "participates in a single round of " -#~ "federated learning." +#~ msgid "Name of the federation to run the app on" #~ msgstr "" +#~ msgid "Create new Flower App." +#~ msgstr "Flower 서버를 실행하세요." + +#~ msgid "The ML framework to use" +#~ msgstr "" + +#~ msgid "options" +#~ msgstr "해결법" + #~ msgid "" -#~ "In addition to the regular capabilities" -#~ " where server and clients run on " -#~ "multiple machines, Flower, therefore, provides" -#~ " special simulation capabilities that " -#~ "create ``FlowerClient`` instances only when" -#~ " they are actually necessary for " -#~ "training or evaluation. To enable the" -#~ " Flower framework to create clients " -#~ "when necessary, we need to implement " -#~ "a function called ``client_fn`` that " -#~ "creates a ``FlowerClient`` instance on " -#~ "demand. Flower calls ``client_fn`` whenever" -#~ " it needs an instance of one " -#~ "particular client to call ``fit`` or " -#~ "``evaluate`` (those instances are usually " -#~ "discarded after use, so they should " -#~ "not keep any local state). Clients " -#~ "are identified by a client ID, or" -#~ " short ``cid``. The ``cid`` can be" -#~ " used, for example, to load different" -#~ " local data partitions for different " -#~ "clients, as can be seen below:" +#~ "PyTorch | TensorFlow | sklearn | " +#~ "HuggingFace | JAX | MLX | NumPy" +#~ " | FlowerTune | Flower Baseline" #~ msgstr "" -#~ msgid "Starting the training" +#~ msgid "The Flower username of the author" #~ msgstr "" -#~ msgid "" -#~ "We now have the class ``FlowerClient``" -#~ " which defines client-side " -#~ "training/evaluation and ``client_fn`` which " -#~ "allows Flower to create ``FlowerClient`` " -#~ "instances whenever it needs to call " -#~ "``fit`` or ``evaluate`` on one " -#~ "particular client. The last step is " -#~ "to start the actual simulation using " -#~ "``flwr.simulation.start_simulation``." +#~ msgid "The name of the Flower App" +#~ msgstr "Flower 기본 이미지의 태그." + +#~ msgid "Run Flower App." +#~ msgstr "Flower 서버를 실행하세요." + +#~ msgid "Override configuration key-value pairs, should be of the format:" #~ msgstr "" #~ msgid "" -#~ "The function ``start_simulation`` accepts a" -#~ " number of arguments, amongst them " -#~ "the ``client_fn`` used to create " -#~ "``FlowerClient`` instances, the number of " -#~ "clients to simulate (``num_clients``), the " -#~ "number of federated learning rounds " -#~ "(``num_rounds``), and the strategy. The " -#~ "strategy encapsulates the federated learning" -#~ " approach/algorithm, for example, *Federated " -#~ "Averaging* (FedAvg)." +#~ "`--run-config 'key1=\"value1\" key2=\"value2\"' " +#~ "--run-config 'key3=\"value3\"'`" #~ msgstr "" #~ msgid "" -#~ "Flower has a number of built-in" -#~ " strategies, but we can also use " -#~ "our own strategy implementations to " -#~ "customize nearly all aspects of the " -#~ "federated learning approach. For this " -#~ "example, we use the built-in " -#~ "``FedAvg`` implementation and customize it " -#~ "using a few basic parameters. The " -#~ "last step is the actual call to" -#~ " ``start_simulation`` which - you guessed" -#~ " it - starts the simulation:" +#~ "Note that `key1`, `key2`, and `key3` " +#~ "in this example need to exist " +#~ "inside the `pyproject.toml` in order to" +#~ " be properly overriden." #~ msgstr "" #~ msgid "" -#~ "When we call ``start_simulation``, we " -#~ "tell Flower that there are 10 " -#~ "clients (``num_clients=10``). Flower then goes" -#~ " ahead an asks the ``FedAvg`` " -#~ "strategy to select clients. ``FedAvg`` " -#~ "knows that it should select 100% " -#~ "of the available clients " -#~ "(``fraction_fit=1.0``), so it goes ahead " -#~ "and selects 10 random clients (i.e., " -#~ "100% of 10)." +#~ "Use `--stream` with `flwr run` to " +#~ "display logs; logs are not streamed " +#~ "by default." #~ msgstr "" -#~ msgid "" -#~ "Flower then asks the selected 10 " -#~ "clients to train the model. When " -#~ "the server receives the model parameter" -#~ " updates from the clients, it hands" -#~ " those updates over to the strategy" -#~ " (*FedAvg*) for aggregation. The strategy" -#~ " aggregates those updates and returns " -#~ "the new global model, which then " -#~ "gets used in the next round of " -#~ "federated learning." +#~ msgid "``False``" +#~ msgstr "``flwr/base``" + +#~ msgid "Path of the Flower App to run." +#~ msgstr "Flower 기본 이미지의 태그." + +#~ msgid "Name of the federation to run the app on." #~ msgstr "" #~ msgid "" -#~ "The only thing left to do is " -#~ "to tell the strategy to call this" -#~ " function whenever it receives evaluation" -#~ " metric dictionaries from the clients:" +#~ "Note that since version ``1.11.0``, " +#~ "``flower-server-app`` no longer supports" +#~ " passing a reference to a `ServerApp`" +#~ " attribute. Instead, you need to pass" +#~ " the path to Flower app via the" +#~ " argument ``--app``. This is the path" +#~ " to a directory containing a " +#~ "`pyproject.toml`. You can create a valid" +#~ " Flower app by executing ``flwr new``" +#~ " and following the prompt." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll begin to " -#~ "customize the federated learning system " -#~ "we built in the introductory notebook" -#~ " (again, using `Flower `__" -#~ " and `PyTorch `__)." +#~ "A config (key/value mapping) held by " +#~ "the entity in a given run and " +#~ "that will stay local. It can be" +#~ " used at any point during the " +#~ "lifecycle of this entity (e.g. across" +#~ " multiple rounds)" #~ msgstr "" -#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgid "" +#~ ":py:obj:`RUN_SUPEREXEC_ENTER " +#~ "`\\" #~ msgstr "" +#~ ":py:obj:`RUN_SUPEREXEC_ENTER " +#~ "`\\" #~ msgid "" -#~ "Flower, by default, initializes the " -#~ "global model by asking one random " -#~ "client for the initial parameters. In" -#~ " many cases, we want more control " -#~ "over parameter initialization though. Flower" -#~ " therefore allows you to directly " -#~ "pass the initial parameters to the " -#~ "Strategy:" +#~ ":py:obj:`RUN_SUPEREXEC_LEAVE " +#~ "`\\" #~ msgstr "" +#~ ":py:obj:`RUN_SUPEREXEC_LEAVE " +#~ "`\\" -#~ msgid "" -#~ "Passing ``initial_parameters`` to the " -#~ "``FedAvg`` strategy prevents Flower from " -#~ "asking one of the clients for the" -#~ " initial parameters. If we look " -#~ "closely, we can see that the logs" -#~ " do not show any calls to the" -#~ " ``FlowerClient.get_parameters`` method." +#~ msgid "Log error stating that module `ray` could not be imported." #~ msgstr "" #~ msgid "" -#~ "We've seen the function ``start_simulation``" -#~ " before. It accepts a number of " -#~ "arguments, amongst them the ``client_fn`` " -#~ "used to create ``FlowerClient`` instances, " -#~ "the number of clients to simulate " -#~ "``num_clients``, the number of rounds " -#~ "``num_rounds``, and the strategy." -#~ msgstr "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing JAX workload. " +#~ "We are using JAX to train a " +#~ "linear regression model on a scikit-" +#~ "learn dataset. We will structure the " +#~ "example similar to our `PyTorch - " +#~ "From Centralized To Federated " +#~ "`_ walkthrough. " +#~ "First, we build a centralized training" +#~ " approach based on the `Linear " +#~ "Regression with JAX " +#~ "`_" +#~ " tutorial`. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "이 튜토리얼에서는 Flower를 사용하여 기존 JAX " +#~ "워크로드의 연합 버전을 구축하는 방법을 보여드립니다. JAX를" +#~ " 사용해 scikit-learn 데이터 세트에서 선형 " +#~ "회귀 모델을 훈련하고 있습니다. 예제는 '파이토치 - " +#~ "Centralized에서 Federated으로 " +#~ "`_ 워크스루와 유사하게" +#~ " 구성하겠습니다. 먼저, `JAX를 사용한 선형 회귀 " +#~ "`_" +#~ " 튜토리얼`을 기반으로 centralized 학습 접근 방식을" +#~ " 구축합니다. 그런 다음 centralized 트레이닝 코드를" +#~ " 기반으로 federated 방식으로 트레이닝을 실행합니다." #~ msgid "" -#~ "Next, we'll just pass this function " -#~ "to the FedAvg strategy before starting" -#~ " the simulation:" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " ``jax``, ``jaxlib``, ``scikit-learn``, and" +#~ " ``flwr``:" #~ msgstr "" +#~ "JAX 예제 빌드를 시작하기 전에 :code:`jax`, " +#~ ":code:`jaxlib`, :code:`scikit-learn`, :code:`flwr`" +#~ " 패키지를 설치해야 합니다:" + +#~ msgid "Linear Regression with JAX" +#~ msgstr "JAX를 사용한 선형 회귀" #~ msgid "" -#~ "We now have 1000 partitions, each " -#~ "holding 45 training and 5 validation " -#~ "examples. Given that the number of " -#~ "training examples on each client is " -#~ "quite small, we should probably train" -#~ " the model a bit longer, so we" -#~ " configure the clients to perform 3" -#~ " local training epochs. We should " -#~ "also adjust the fraction of clients " -#~ "selected for training during each round" -#~ " (we don't want all 1000 clients " -#~ "participating in every round), so we " -#~ "adjust ``fraction_fit`` to ``0.05``, which " -#~ "means that only 5% of available " -#~ "clients (so 50 clients) will be " -#~ "selected for training each round:" -#~ msgstr "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a ``Linear Regression`` model. If" +#~ " you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." +#~ msgstr "" +#~ "먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙" +#~ " 집중식 훈련 코드에 대한 간략한 설명부터 " +#~ "시작하겠습니다. 더 자세한 설명을 원하시면 공식 `JAX" +#~ " 문서 `_를 참조하세요." -#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" -#~ msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgid "" +#~ "Let's create a new file called " +#~ "``jax_training.py`` with all the components" +#~ " required for a traditional (centralized)" +#~ " linear regression training. First, the " +#~ "JAX packages ``jax`` and ``jaxlib`` need" +#~ " to be imported. In addition, we " +#~ "need to import ``sklearn`` since we " +#~ "use ``make_regression`` for the dataset " +#~ "and ``train_test_split`` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the ``flwr`` package " +#~ "for federated learning. This will be " +#~ "done later." +#~ msgstr "" +#~ "전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 " +#~ "구성 요소가 포함된 :code:`jax_training.py`라는 새 " +#~ "파일을 생성해 보겠습니다. 먼저, JAX 패키지인 " +#~ ":code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 " +#~ "데이터 세트에 :code:`make_regression`을 사용하고 데이터 " +#~ "세트를 학습 및 테스트 세트로 분할하기 위해 " +#~ ":code:`train_test_split`을 사용하므로 :code:`sklearn`을 " +#~ "가져와야 합니다. 연합 학습을 위해 아직 " +#~ ":code:`flwr` 패키지를 가져오지 않은 것을 볼 수" +#~ " 있습니다. 이 작업은 나중에 수행됩니다." -#~ msgid "|01471150fd5144c080a176b43e92a3ff|" -#~ msgstr "|01471150fd5144c080a176b43e92a3ff|" +#~ msgid "" +#~ "The ``load_data()`` function loads the " +#~ "mentioned training and test sets." +#~ msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" -#~ msgstr "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgid "" +#~ "The model architecture (a very simple" +#~ " ``Linear Regression`` model) is defined" +#~ " in ``load_model()``." +#~ msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." -#~ msgid "|3047bbce54b34099ae559963d0420d79|" -#~ msgstr "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function ``train()``), which loops over" +#~ " the training set and measures the" +#~ " loss (function ``loss_fn()``) for each " +#~ "batch of training examples. The loss " +#~ "function is separate since JAX takes " +#~ "derivatives with a ``grad()`` function " +#~ "(defined in the ``main()`` function and" +#~ " called in ``train()``)." +#~ msgstr "" +#~ "이제 훈련 집합을 반복하고 각 훈련 예제 배치에" +#~ " 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수" +#~ " :code:`train()`)을 정의해야 합니다. JAX는 " +#~ ":code:`grad()` 함수(:code:`main()` 함수에 정의되고 " +#~ ":code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는" +#~ " 분리되어 있습니다." -#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" -#~ msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function ``evaluation()``. " +#~ "The function takes all test examples " +#~ "and measures the loss of the " +#~ "linear regression model." +#~ msgstr "" +#~ "모델의 평가는 :code:`evaluation()` 함수에 정의되어 " +#~ "있습니다. 이 함수는 모든 테스트 예제를 가져와 " +#~ "선형 회귀 모델의 손실을 측정합니다." -#~ msgid "|c24c1478b30e4f74839208628a842d1e|" -#~ msgstr "|c24c1478b30e4f74839208628a842d1e|" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the ``jax.grad()`` function" +#~ " is defined in ``main()`` and passed" +#~ " to ``train()``." +#~ msgstr "" +#~ "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 " +#~ "정의했으므로 이제 모든 것을 종합하여 JAX를 사용 " +#~ "모델을 훈련할 수 있습니다. 이미 언급했듯이 " +#~ ":code:`jax.grad()` 함수는 :code:`main()`에 정의되어 " +#~ ":code:`train()`에 전달됩니다." + +#~ msgid "You can now run your (centralized) JAX linear regression workload:" +#~ msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" -#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" -#~ msgstr "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used JAX before." +#~ " Let's take the next step and " +#~ "use what we've built to create a" +#~ " simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." +#~ msgstr "" +#~ "지금까지는 JAX를 사용해 본 적이 있다면 이 모든" +#~ " 것이 상당히 익숙해 보일 것입니다. 다음 단계로 " +#~ "넘어가서 우리가 구축한 것을 사용하여 하나의 서버와 " +#~ "두 개의 클라이언트로 구성된 간단한 연합 학습 " +#~ "시스템을 만들어 보겠습니다." -#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" -#~ msgstr "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgid "JAX meets Flower" +#~ msgstr "JAX와 Flower의 만남" -#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -#~ msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgid "" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in ``jax_training.py`` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." +#~ msgstr "" +#~ "기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 " +#~ "쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 " +#~ "*클라이언트*에 대해 :code:`jax_training.py`의 코드를 사용해야" +#~ " 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다." +#~ " 클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된" +#~ " 파라미터는 *서버*로 다시 전송되며, 수신된 모든 " +#~ "파라미터 업데이트의 평균을 구합니다. 이는 연합 학습 " +#~ "프로세스의 한 라운드를 설명하며, 이 과정을 여러 " +#~ "라운드에 걸쳐 반복합니다." -#~ msgid "|032eb6fed6924ac387b9f13854919196|" -#~ msgstr "|032eb6fed6924ac387b9f13854919196|" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined JAX training " +#~ "in ``jax_training.py``. Our *client* needs " +#~ "to import ``flwr``, but also ``jax`` " +#~ "and ``jaxlib`` to update the parameters" +#~ " on our JAX model:" +#~ msgstr "" +#~ "마지막으로, :code:`client.py`에서 *client* 로직을 정의하고" +#~ " :code:`jax_training.py`에서 이전에 정의한 JAX 교육을" +#~ " 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야" +#~ " 하며, JAX 모델의 파라미터를 업데이트하기 위해 " +#~ ":code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" -#~ msgstr "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``FlowerClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. ``FlowerClient`` needs to implement" +#~ " four methods, two methods for " +#~ "getting/setting model parameters, one method" +#~ " for training the model, and one " +#~ "method for testing the model:" +#~ msgstr "" +#~ "Flower *클라이언트*를 구현한다는 것은 기본적으로 " +#~ ":code:`flwr.client.Client` 또는 " +#~ ":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 " +#~ "의미합니다. 구현은 :code:`flwr.client.NumPyClient`를 기반으로 " +#~ "하며, 이를 :code:`FlowerClient`라고 부를 것입니다. " +#~ ":code:`NumPyClient`는 필요한 일부 보일러플레이를 피할 수" +#~ " 있기 때문에 NumPy 상호 운용성이 좋은 " +#~ "프레임워크(예: JAX)를 사용하는 경우 :code:`Client`보다 " +#~ "구현하기가 약간 더 쉽습니다. code:`FlowerClient`는 모델" +#~ " 매개변수를 가져오거나 설정하는 메서드 2개, 모델 " +#~ "학습을 위한 메서드 1개, 모델 테스트를 위한 " +#~ "메서드 1개 등 총 4개의 메서드를 구현해야 " +#~ "합니다:" + +#~ msgid "``set_parameters (optional)``" +#~ msgstr ":code:`set_parameters (선택사항)`" + +#~ msgid "transform parameters to NumPy ``ndarray``'s" +#~ msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" + +#~ msgid "get the updated local model parameters and return them to the server" +#~ msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" + +#~ msgid "return the local loss to the server" +#~ msgstr "로컬 손실을 서버로 반환합니다" -#~ msgid "|7efbe3d29d8349b89594e8947e910525|" -#~ msgstr "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ "``DeviceArray`` to ``NumPy ndarray`` to " +#~ "make them compatible with `NumPyClient`." +#~ msgstr "" +#~ "어려운 부분은 JAX 모델 매개변수를 " +#~ ":code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여" +#~ " `NumPyClient`와 호환되도록 하는 것입니다." -#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" -#~ msgstr "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``evaluate()`` " +#~ "previously defined in ``jax_training.py``. So" +#~ " what we really do here is we" +#~ " tell Flower through our ``NumPyClient``" +#~ " subclass which of our already " +#~ "defined functions to call for training" +#~ " and evaluation. We included type " +#~ "annotations to give you a better " +#~ "understanding of the data types that " +#~ "get passed around." +#~ msgstr "" +#~ "두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 " +#~ ":code:`evaluate`는 이전에 :code:`jax_training.py`에 정의된" +#~ " 함수 :code:`train()`과 :code:`evaluate()`를 사용합니다." +#~ " 따라서 여기서 우리가 실제로 하는 일은 이미 " +#~ "정의된 함수 중 훈련과 평가를 위해 호출할 함수를" +#~ " :code:`NumPyClient` 서브클래스를 통해 Flower에게 " +#~ "알려주는 것입니다. 전달되는 데이터 유형을 더 잘 " +#~ "이해할 수 있도록 유형 type annotation을 " +#~ "포함했습니다." + +#~ msgid "Having defined the federation process, we can run it." +#~ msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." -#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -#~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your JAX " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "" +#~ "를 입력하고(그 전에 서버가 계속 실행 중인지 " +#~ "확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX" +#~ " 프로젝트를 확인합니다. 축하합니다!" -#~ msgid "run\\_client\\_app" -#~ msgstr "run\\_client\\_app" +#~ msgid "" +#~ "The source code of this example " +#~ "was improved over time and can be" +#~ " found here: `Quickstart JAX " +#~ "`_. Our example is somewhat over-" +#~ "simplified because both clients load the" +#~ " same dataset." +#~ msgstr "" +#~ "이 예제의 소스 코드는 시간이 지남에 따라 " +#~ "개선되었으며 여기에서 확인할 수 있습니다: 'Quickstart " +#~ "JAX `_. 두 클라이언트가 동일한 데이터 " +#~ "세트를 로드하기 때문에 이 예제는 다소 단순화되어 " +#~ "있습니다." -#~ msgid "run\\_supernode" -#~ msgstr "run\\_supernode" +#~ msgid "" +#~ "You're now prepared to explore this " +#~ "topic further. How about using a " +#~ "more sophisticated model or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" +#~ "이제 이 주제를 더 자세히 살펴볼 준비가 " +#~ "되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 " +#~ "집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 " +#~ "추가하는 것은 어떨까요?" -#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a ``Logistic Regression``" +#~ " model on MNIST using Flower and " +#~ "scikit-learn." #~ msgstr "" #~ msgid "" -#~ "When there isn't an exact match, " -#~ "all the existing keys in the " -#~ "layout map will be treated as a" -#~ " regex and map against the input " -#~ "key again. The first match will be" -#~ " returned, based on the key insertion" -#~ " order. Return None if there isn't" -#~ " any match found." +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this :doc:`virtualenv `." #~ msgstr "" -#~ msgid "the string key as the query for the layout." +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients* all having the " +#~ "same model." #~ msgstr "" -#~ msgid "Corresponding layout based on the query." +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual model parameter updates for " +#~ "the model based on their local " +#~ "datasets. These updates are then sent" +#~ " to the *server* which will aggregate" +#~ " them to produce an updated global" +#~ " model. Finally, the *server* sends " +#~ "this improved version of the model " +#~ "back to each *client*. A complete " +#~ "cycle of parameters updates is called" +#~ " a *round*." #~ msgstr "" -#~ msgid "run\\_server\\_app" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by " +#~ "running:" #~ msgstr "" -#~ msgid "run\\_superlink" +#~ msgid "Since we want to use scikit-learn, let's go ahead and install it:" #~ msgstr "" -#~ msgid "Start a Ray-based Flower simulation server." +#~ msgid "Or simply install all dependencies using Poetry:" #~ msgstr "" #~ msgid "" -#~ "A function creating `Client` instances. " -#~ "The function must have the signature " -#~ "`client_fn(context: Context). It should return" -#~ " a single client instance of type " -#~ "`Client`. Note that the created client" -#~ " instances are ephemeral and will " -#~ "often be destroyed after a single " -#~ "method invocation. Since client instances " -#~ "are not long-lived, they should " -#~ "not attempt to carry state over " -#~ "method invocations. Any state required " -#~ "by the instance (model, dataset, " -#~ "hyperparameters, ...) should be (re-)created" -#~ " in either the call to `client_fn`" -#~ " or the call to any of the " -#~ "client methods (e.g., load evaluation " -#~ "data in the `evaluate` method itself)." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within ``utils.py``. The " +#~ "``utils.py`` contains different functions " +#~ "defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid "The total number of clients in this simulation." -#~ msgstr "" +#~ msgid "``get_model_parameters()``" +#~ msgstr "모델 매개변수." -#~ msgid "" -#~ "UNSUPPORTED, WILL BE REMOVED. USE " -#~ "`num_clients` INSTEAD. List `client_id`s for" -#~ " each client. This is only required" -#~ " if `num_clients` is not set. Setting" -#~ " both `num_clients` and `clients_ids` with" -#~ " `len(clients_ids)` not equal to " -#~ "`num_clients` generates an error. Using " -#~ "this argument will raise an error." +#~ msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" #~ msgstr "" -#~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. To understand the GPU" -#~ " utilization caused by `num_gpus`, as " -#~ "well as using custom resources, please" -#~ " consult the Ray documentation." +#~ msgid "``set_model_params()``" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." +#~ msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgid "``set_initial_params()``" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgid "Initializes the model parameters that the Flower server will ask for" #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ "Please check out ``utils.py`` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the ``client.py`` and imported. The" +#~ " ``client.py`` also requires to import " +#~ "several packages such as Flower and " +#~ "scikit-learn:" #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The ``FederatedDataset.load_partition()`` method " +#~ "loads the partitioned training set for" +#~ " each partition ID defined in the " +#~ "``--partition-id`` argument." #~ msgstr "" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ "``utils.set_initial_params()``." #~ msgstr "" #~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." +#~ "The Flower server interacts with clients" +#~ " through an interface called ``Client``." +#~ " When the server selects a particular" +#~ " client for training, it sends " +#~ "training instructions over the network. " +#~ "The client receives those instructions " +#~ "and calls one of the ``Client`` " +#~ "methods to run your code (i.e., to" +#~ " fit the logistic regression we " +#~ "defined earlier)." #~ msgstr "" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." +#~ "Flower provides a convenience class " +#~ "called ``NumPyClient`` which makes it " +#~ "easier to implement the ``Client`` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing ``NumPyClient`` " +#~ "usually means defining the following " +#~ "methods (``set_parameters`` is optional " +#~ "though):" #~ msgstr "" -#~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "executing a ClientApp wrapping input " -#~ "argument `client_fn`." +#~ msgid "return the model weight as a list of NumPy ndarrays" #~ msgstr "" +#~ msgid "``set_parameters`` (optional)" +#~ msgstr ":code:`set_parameters (선택사항)`" + #~ msgid "" -#~ "If you want to create your own " -#~ "Actor classes, you might need to " -#~ "pass some input argument. You can " -#~ "use this dictionary for such purpose." +#~ "update the local model weights with " +#~ "the parameters received from the server" #~ msgstr "" -#~ msgid "" -#~ "(default: \"DEFAULT\") Optional string " -#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" -#~ " to choose in which node the " -#~ "actor is placed. If you are an " -#~ "advanced user needed more control you" -#~ " can use lower-level scheduling " -#~ "strategies to pin actors to specific " -#~ "compute nodes (e.g. via " -#~ "NodeAffinitySchedulingStrategy). Please note this" -#~ " is an advanced feature. For all " -#~ "details, please refer to the Ray " -#~ "documentation: https://docs.ray.io/en/latest/ray-" -#~ "core/scheduling/index.html" +#~ msgid "is directly imported with ``utils.set_model_params()``" #~ msgstr "" -#~ msgid "**hist** -- Object containing metrics from training." +#~ msgid "set the local model weights" #~ msgstr "" -#~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with FastAI to train a vision " -#~ "model on CIFAR-10." +#~ msgid "train the local model" #~ msgstr "" -#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgid "return the updated local model weights" +#~ msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#~ msgid "test the local model" #~ msgstr "" -#~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn more." +#~ msgid "The methods can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "Check out this Federating Learning " -#~ "quickstart tutorial for using Flower " -#~ "with HuggingFace Transformers in order " -#~ "to fine-tune an LLM." +#~ "We can now create an instance of" +#~ " our class ``MnistClient`` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using Hugging Face Transformers and " -#~ "Flower!" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` or " +#~ "``NumPyClient`` and call " +#~ "``fl.client.start_client()``. If you implement " +#~ "a client of type ``NumPyClient`` you'll" +#~ " need to first call its " +#~ "``to_client()`` method. The string " +#~ "``\"0.0.0.0:8080\"`` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use ``\"0.0.0.0:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we pass to the client." #~ msgstr "" #~ msgid "" -#~ "We will leverage Hugging Face to " -#~ "federate the training of language models" -#~ " over multiple clients using Flower. " -#~ "More specifically, we will fine-tune " -#~ "a pre-trained Transformer model " -#~ "(distilBERT) for sequence classification over" -#~ " a dataset of IMDB ratings. The " -#~ "end goal is to detect if a " -#~ "movie rating is positive or negative." +#~ "The following Flower server is a " +#~ "little bit more advanced and returns " +#~ "an evaluation function for the " +#~ "server-side evaluation. First, we import" +#~ " again all required libraries such as" +#~ " Flower and scikit-learn." #~ msgstr "" -#~ msgid "Dependencies" +#~ msgid "``server.py``, import Flower and start the server:" #~ msgstr "" #~ msgid "" -#~ "To follow along this tutorial you " -#~ "will need to install the following " -#~ "packages: :code:`datasets`, :code:`evaluate`, " -#~ ":code:`flwr`, :code:`torch`, and " -#~ ":code:`transformers`. This can be done " -#~ "using :code:`pip`:" +#~ "The number of federated learning rounds" +#~ " is set in ``fit_round()`` and the" +#~ " evaluation is defined in " +#~ "``get_evaluate_fn()``. The evaluation function " +#~ "is called after each federated learning" +#~ " round and gives you information " +#~ "about loss and accuracy. Note that " +#~ "we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." #~ msgstr "" -#~ msgid "Standard Hugging Face workflow" +#~ msgid "" +#~ "The ``main`` contains the server-side" +#~ " parameter initialization " +#~ "``utils.set_initial_params()`` as well as the" +#~ " aggregation strategy ``fl.server.strategy:FedAvg()``." +#~ " The strategy is the default one, " +#~ "federated averaging (or FedAvg), with " +#~ "two clients and evaluation after each" +#~ " federated learning round. The server " +#~ "can be started with the command " +#~ "``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))``." #~ msgstr "" -#~ msgid "Handling the data" +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We, " +#~ "therefore, have to start the server " +#~ "first:" #~ msgstr "" #~ msgid "" -#~ "To fetch the IMDB dataset, we will" -#~ " use Hugging Face's :code:`datasets` " -#~ "library. We then need to tokenize " -#~ "the data and create :code:`PyTorch` " -#~ "dataloaders, this is all done in " -#~ "the :code:`load_data` function:" +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Open a new terminal and " +#~ "start the first client:" #~ msgstr "" -#~ msgid "Training and testing the model" +#~ msgid "Open another terminal and start the second client:" #~ msgstr "" #~ msgid "" -#~ "Once we have a way of creating " -#~ "our trainloader and testloader, we can" -#~ " take care of the training and " -#~ "testing. This is very similar to " -#~ "any :code:`PyTorch` training or testing " -#~ "loop:" -#~ msgstr "" - -#~ msgid "Creating the model itself" +#~ "Each client will have its own " +#~ "dataset. You should now see how " +#~ "the training does in the very " +#~ "first terminal (the one that started " +#~ "the server):" #~ msgstr "" #~ msgid "" -#~ "To create the model itself, we " -#~ "will just load the pre-trained " -#~ "distillBERT model using Hugging Face’s " -#~ ":code:`AutoModelForSequenceClassification` :" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in ``examples/sklearn-logreg-" +#~ "mnist``." #~ msgstr "" -#~ msgid "Federating the example" +#~ msgid "Federated XGBoost" #~ msgstr "" -#~ msgid "Creating the IMDBClient" +#~ msgid "Why federated XGBoost?" #~ msgstr "" #~ msgid "" -#~ "To federate our example to multiple " -#~ "clients, we first need to write " -#~ "our Flower client class (inheriting from" -#~ " :code:`flwr.client.NumPyClient`). This is very" -#~ " easy, as our model is a " -#~ "standard :code:`PyTorch` model:" +#~ "Indeed, as the demand for data " +#~ "privacy and decentralized learning grows, " +#~ "there's an increasing requirement to " +#~ "implement federated XGBoost systems for " +#~ "specialised applications, like survival " +#~ "analysis and financial fraud detection." #~ msgstr "" #~ msgid "" -#~ "The :code:`get_parameters` function lets the" -#~ " server get the client's parameters. " -#~ "Inversely, the :code:`set_parameters` function " -#~ "allows the server to send its " -#~ "parameters to the client. Finally, the" -#~ " :code:`fit` function trains the model " -#~ "locally for the client, and the " -#~ ":code:`evaluate` function tests the model " -#~ "locally and returns the relevant " -#~ "metrics." +#~ "Federated learning ensures that raw data" +#~ " remains on the local device, making" +#~ " it an attractive approach for " +#~ "sensitive domains where data security " +#~ "and privacy are paramount. Given the " +#~ "robustness and efficiency of XGBoost, " +#~ "combining it with federated learning " +#~ "offers a promising solution for these" +#~ " specific challenges." #~ msgstr "" -#~ msgid "Starting the server" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " ``xgboost`` package. We use a simple" +#~ " example (`full code xgboost-quickstart " +#~ "`_) with two *clients* and " +#~ "one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ msgid "" -#~ "Now that we have a way to " -#~ "instantiate clients, we need to create" -#~ " our server in order to aggregate " -#~ "the results. Using Flower, this can " -#~ "be done very easily by first " -#~ "choosing a strategy (here, we are " -#~ "using :code:`FedAvg`, which will define " -#~ "the global weights as the average " -#~ "of all the clients' weights at " -#~ "each round) and then using the " -#~ ":code:`flwr.server.start_server` function:" +#~ msgid "" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a :doc:`virtualenv " +#~ "`." #~ msgstr "" #~ msgid "" -#~ "The :code:`weighted_average` function is there" -#~ " to provide a way to aggregate " -#~ "the metrics distributed amongst the " -#~ "clients (basically this allows us to " -#~ "display a nice average accuracy and " -#~ "loss for every round)." +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. Now" +#~ " that we have all our dependencies" +#~ " installed, let's run a simple " +#~ "distributed training with two clients " +#~ "and one server." #~ msgstr "" -#~ msgid "Putting everything together" +#~ msgid "" +#~ "In a file called ``client.py``, import" +#~ " xgboost, Flower, Flower Datasets and " +#~ "other related functions:" #~ msgstr "" -#~ msgid "We can now start client instances using:" +#~ msgid "Dataset partition and hyper-parameter selection" #~ msgstr "" #~ msgid "" -#~ "And they will be able to connect" -#~ " to the server and start the " -#~ "federated training." +#~ "Prior to local training, we require " +#~ "loading the HIGGS dataset from Flower" +#~ " Datasets and conduct data partitioning " +#~ "for FL:" #~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the `full code example " -#~ "`_ ." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (``IidPartitioner(num_partitions=30)``). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ "``partition_id``:" #~ msgstr "" #~ msgid "" -#~ "Of course, this is a very basic" -#~ " example, and a lot can be " -#~ "added or modified, it was just to" -#~ " showcase how simply we could " -#~ "federate a Hugging Face workflow using" -#~ " Flower." +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for ``xgboost`` package." +#~ msgstr "" + +#~ msgid "Finally, we define the hyper-parameters used for XGBoost training." #~ msgstr "" #~ msgid "" -#~ "Note that in this example we used" -#~ " :code:`PyTorch`, but we could have " -#~ "very well used :code:`TensorFlow`." +#~ "The ``num_local_round`` represents the number" +#~ " of iterations for local tree boost." +#~ " We use CPU for the training in" +#~ " default. One can shift it to " +#~ "GPU by setting ``tree_method`` to " +#~ "``gpu_hist``. We use AUC as evaluation" +#~ " metric." +#~ msgstr "" + +#~ msgid "Flower client definition for XGBoost" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with PyTorch Lightning to train an " -#~ "Auto Encoder model on MNIST." +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define ``XgbClient`` " +#~ "class inherited from ``fl.client.Client``." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using PyTorch Lightning " -#~ "and Flower!" +#~ "All required parameters defined above " +#~ "are passed to ``XgbClient``'s constructor." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "Then, we override ``get_parameters``, ``fit``" +#~ " and ``evaluate`` methods insides " +#~ "``XgbClient`` class as follows." #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with TensorFlow to train a MobilNetV2" -#~ " model on CIFAR-10." +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use ``get_parameters`` and" +#~ " ``set_parameters`` to initialise model " +#~ "parameters for XGBoost. As a result, " +#~ "let's return an empty tensor in " +#~ "``get_parameters`` when it is called by" +#~ " the server at the first round." #~ msgstr "" -#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgid "" +#~ "In ``fit``, at the first round, we" +#~ " call ``xgb.train()`` to build up the" +#~ " first set of trees. From the " +#~ "second round, we load the global " +#~ "model sent from server to new " +#~ "build Booster object, and then update" +#~ " model weights on local training data" +#~ " with function ``local_boost`` as follows:" #~ msgstr "" -#~ msgid "Before Flower can be imported we have to install it:" +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class ``XgbClient`` and add one" +#~ " line to actually run this client:" #~ msgstr "" #~ msgid "" -#~ "Since we want to use the Keras " -#~ "API of TensorFlow (TF), we have to" -#~ " install TF as well:" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` and " +#~ "call ``fl.client.start_client()``. The string " +#~ "``\"[::]:8080\"`` tells the client which " +#~ "server to connect to. In our case" +#~ " we can run the server and the" +#~ " client on the same machine, " +#~ "therefore we use ``\"[::]:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we point the client at." #~ msgstr "" -#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgid "" +#~ "These updates are then sent to the" +#~ " *server* which will aggregate them " +#~ "to produce a better model. Finally, " +#~ "the *server* sends this improved version" +#~ " of the model back to each " +#~ "*client* to finish a complete FL " +#~ "round." #~ msgstr "" #~ msgid "" -#~ "We use the Keras utilities of TF" -#~ " to load CIFAR10, a popular colored" -#~ " image classification dataset for machine" -#~ " learning. The call to " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " -#~ "CIFAR10, caches it locally, and then " -#~ "returns the entire training and test " -#~ "set as NumPy ndarrays." +#~ "In a file named ``server.py``, import" +#~ " Flower and FedXgbBagging from " +#~ "``flwr.server.strategy``." +#~ msgstr "" + +#~ msgid "We first define a strategy for XGBoost bagging aggregation." #~ msgstr "" #~ msgid "" -#~ "Next, we need a model. For the " -#~ "purpose of this tutorial, we use " -#~ "MobilNetV2 with 10 output classes:" +#~ "We use two clients for this " +#~ "example. An ``evaluate_metrics_aggregation`` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The ``config_func`` function is " +#~ "to return the current FL round " +#~ "number to client's ``fit()`` and " +#~ "``evaluate()`` methods." +#~ msgstr "" + +#~ msgid "Then, we start the server:" +#~ msgstr "" + +#~ msgid "Tree-based bagging aggregation" #~ msgstr "" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to train the neural network" -#~ " we defined earlier)." +#~ "After traversal of all clients' models," +#~ " a new global model is generated, " +#~ "followed by the serialisation, and " +#~ "sending back to each client." #~ msgstr "" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses Keras." -#~ " The :code:`NumPyClient` interface defines " -#~ "three methods which can be implemented" -#~ " in the following way:" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in ``metrics_distributed``. One can" +#~ " see that the average AUC increases" +#~ " over FL rounds." #~ msgstr "" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`CifarClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in ``examples/xgboost-quickstart``." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support :doc:`Flower simulation" +#~ " ` making it" +#~ " easy to simulate large client " +#~ "cohorts in a resource-aware manner. " +#~ "Let's take a look!" #~ msgstr "" -#~ msgid "Each client will have its own dataset." +#~ msgid "Cyclic training" #~ msgstr "" #~ msgid "" -#~ "You should now see how the " -#~ "training does in the very first " -#~ "terminal (the one that started the " -#~ "server):" +#~ "To do this, we first customise a" +#~ " ``ClientManager`` in ``server_utils.py``:" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this can be " -#~ "found in :code:`examples/quickstart-" -#~ "tensorflow/client.py`." +#~ "The customised ``ClientManager`` samples all" +#~ " available clients in each FL round" +#~ " based on the order of connection " +#~ "to the server. Then, we define a" +#~ " new strategy ``FedXgbCyclic`` in " +#~ "``flwr.server.strategy.fedxgb_cyclic.py``, in order " +#~ "to sequentially select only one client" +#~ " in given round and pass the " +#~ "received model to next client." #~ msgstr "" -#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgid "Customised data partitioning" #~ msgstr "" -#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgid "" +#~ "In ``dataset.py``, we have a function" +#~ " ``instantiate_partitioner`` to instantiate the" +#~ " data partitioner based on the given" +#~ " ``num_partitions`` and ``partitioner_type``. " +#~ "Currently, we provide four supported " +#~ "partitioner type to simulate the " +#~ "uniformity/non-uniformity in data quantity " +#~ "(uniform, linear, square, exponential)." #~ msgstr "" -#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgid "Customised centralised/distributed evaluation" #~ msgstr "" -#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in ``server_utils.py``:" #~ msgstr "" -#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a ``Booster`` " +#~ "object and loads the global model " +#~ "weights to it. The evaluation is " +#~ "conducted by calling ``eval_set()`` method," +#~ " and the tested AUC value is " +#~ "reported." #~ msgstr "" -#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ "``evaluate()`` method insides the " +#~ "``XgbClient`` class in ``client_utils.py``." #~ msgstr "" -#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgid "Flower simulation" #~ msgstr "" -#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgid "" +#~ "We also provide an example code " +#~ "(``sim.py``) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a ``main()`` function to " +#~ "perform the simulation process:" #~ msgstr "" -#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ "``list``. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" -#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgid "Then, we define the strategies and other hyper-parameters:" #~ msgstr "" -#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling ``fl.simulation.start_simulation``:" #~ msgstr "" -#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgid "" +#~ "One of key parameters for " +#~ "``start_simulation`` is ``client_fn`` which " +#~ "returns a function to construct a " +#~ "client. We define it as follows:" #~ msgstr "" -#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgid "Arguments parser" #~ msgstr "" -#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgid "" +#~ "In ``utils.py``, we define the arguments" +#~ " parsers for clients, server and " +#~ "simulation, allowing users to specify " +#~ "different experimental settings. Let's first" +#~ " see the sever side:" #~ msgstr "" -#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ "``--centralised-eval``, the sever will do" +#~ " centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" -#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgid "Then, the argument parser on client side:" #~ msgstr "" -#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " ``--centralised-eval``, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting ``--scaled-lr``." #~ msgstr "" -#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgid "We also have an argument parser for simulation:" #~ msgstr "" -#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgid "This integrates all arguments for both client and server sides." #~ msgstr "" -#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgid "Example commands" #~ msgstr "" -#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgid "" +#~ "To run a centralised evaluated " +#~ "experiment with bagging strategy on 5" +#~ " clients with exponential distribution for" +#~ " 50 rounds, we first start the " +#~ "server as below:" #~ msgstr "" -#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgid "Then, on each client terminal, we start the clients:" #~ msgstr "" -#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgid "To run the same experiment with Flower simulation:" #~ msgstr "" -#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgid "" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in ``examples" +#~ "/xgboost-comprehensive``." #~ msgstr "" -#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgid "|ac0a9766e26044d6aea222a829859b20|" #~ msgstr "" -#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgid "|36cd6e248b1443ce8a82b5a025bba368|" #~ msgstr "" -#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" #~ msgstr "" -#~ msgid "" -#~ "Some quickstart examples may have " -#~ "limitations or requirements that prevent " -#~ "them from running on every environment." -#~ " For more information, please see " -#~ "`Limitations`_." +#~ msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" #~ msgstr "" -#~ msgid "" -#~ "Change the application code. For " -#~ "example, change the ``seed`` in " -#~ "``quickstart_docker/task.py`` to ``43`` and " -#~ "save it:" +#~ msgid "|7605632e1b0f49599ffacf841491fcfb|" #~ msgstr "" -#~ msgid ":code:`fit`" -#~ msgstr ":code:`fit`" +#~ msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +#~ msgstr "" -#~ msgid "" -#~ "Note that since version :code:`1.11.0`, " -#~ ":code:`flower-server-app` no longer " -#~ "supports passing a reference to a " -#~ "`ServerApp` attribute. Instead, you need " -#~ "to pass the path to Flower app " -#~ "via the argument :code:`--app`. This is" -#~ " the path to a directory containing" -#~ " a `pyproject.toml`. You can create a" -#~ " valid Flower app by executing " -#~ ":code:`flwr new` and following the " -#~ "prompt." +#~ msgid "|5405ed430e4746e28b083b146fb71731|" #~ msgstr "" -#~ msgid "" -#~ "The following examples are available as" -#~ " standalone projects. Quickstart TensorFlow/Keras" -#~ " ---------------------------" +#~ msgid "|a389e87dab394eb48a8949aa2397687b|" #~ msgstr "" -#~ msgid "" -#~ "Let's create a new application project" -#~ " in Xcode and add :code:`flwr` as " -#~ "a dependency in your project. For " -#~ "our application, we will store the " -#~ "logic of our app in " -#~ ":code:`FLiOSModel.swift` and the UI elements" -#~ " in :code:`ContentView.swift`. We will " -#~ "focus more on :code:`FLiOSModel.swift` in " -#~ "this quickstart. Please refer to the " -#~ "`full code example " -#~ "`_ to " -#~ "learn more about the app." +#~ msgid "|89c412136a5146ec8dc32c0973729f12|" #~ msgstr "" -#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgid "|9503d3dc3a144e8aa295f8800cd8a766|" #~ msgstr "" -#~ msgid "" -#~ "Then add the mlmodel to the " -#~ "project simply by drag-and-drop, " -#~ "the mlmodel will be bundled inside " -#~ "the application during deployment to " -#~ "your iOS device. We need to pass" -#~ " the url to access mlmodel and " -#~ "run CoreML machine learning processes, " -#~ "it can be retrieved by calling the" -#~ " function :code:`Bundle.main.url`. For the " -#~ "MNIST dataset, we need to preprocess " -#~ "it into :code:`MLBatchProvider` object. The" -#~ " preprocessing is done inside " -#~ ":code:`DataLoader.swift`." +#~ msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" #~ msgstr "" -#~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this information beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ msgid "|a7579ad7734347508e959d9e14f2f53d|" #~ msgstr "" -#~ msgid "" -#~ "Then start the Flower gRPC client " -#~ "and start communicating to the server" -#~ " by passing our Flower client to " -#~ "the function :code:`startFlwrGRPC`." +#~ msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +#~ msgstr "" + +#~ msgid "|55472eef61274ba1b739408607e109df|" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ "call the provided :code:`MLFlwrClient` and " -#~ "call :code:`startFlwrGRPC()`. The attribute " -#~ ":code:`hostname` and :code:`port` tells the" -#~ " client which server to connect to." -#~ " This can be done by entering " -#~ "the hostname and port in the " -#~ "application before clicking the start " -#~ "button to start the federated learning" -#~ " process." +#~ "When operating in a production " +#~ "environment, it is strongly recommended " +#~ "to enable Transport Layer Security (TLS)" +#~ " for each Flower Component to ensure" +#~ " secure communication." #~ msgstr "" #~ msgid "" -#~ "For simple workloads we can start " -#~ "a Flower server and leave all the" -#~ " configuration possibilities at their " -#~ "default values. In a file named " -#~ ":code:`server.py`, import Flower and start " -#~ "the server:" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container:" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system in your ios device. The " -#~ "full `source code " -#~ "`_ for" -#~ " this example can be found in " -#~ ":code:`examples/ios`." +#~ "``--volume ./certificates/:/app/certificates/:ro``: Mount" +#~ " the ``certificates`` directory in" #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Logistic " -#~ "Regression` model on MNIST using Flower" -#~ " and scikit-learn." +#~ "the current working directory of the " +#~ "host machine as a read-only volume" +#~ " at the" +#~ msgstr "" + +#~ msgid "``/app/certificates`` directory inside the container." #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. However, before" -#~ " setting up the client and server," -#~ " we will define all functionalities " -#~ "that we need for our federated " -#~ "learning setup within :code:`utils.py`. The" -#~ " :code:`utils.py` contains different functions" -#~ " defining all the machine learning " -#~ "basics:" +#~ "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the " +#~ "``ca.crt`` file from the" #~ msgstr "" -#~ msgid ":code:`get_model_parameters()`" +#~ msgid "" +#~ "current working directory of the host" +#~ " machine as a read-only volume " +#~ "at the ``/app/ca.crt``" #~ msgstr "" -#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgid "SuperExec" #~ msgstr "" -#~ msgid ":code:`set_model_params()`" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory where" +#~ " the SuperExec will be executed from," +#~ " we can use the flag ``--volume`` " +#~ "to mount the local directory into " +#~ "the ``/app/certificates/`` directory of the" +#~ " container:" #~ msgstr "" -#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgid "" +#~ ":substitution-code:`flwr/superexec:|stable_flwr_version|`: " +#~ "The name of the image to be " +#~ "run and the specific" #~ msgstr "" -#~ msgid ":code:`set_initial_params()`" +#~ msgid "SuperExec." #~ msgstr "" #~ msgid "" -#~ "Please check out :code:`utils.py` `here " -#~ "`_ for more details. " -#~ "The pre-defined functions are used " -#~ "in the :code:`client.py` and imported. " -#~ "The :code:`client.py` also requires to " -#~ "import several packages such as Flower" -#~ " and scikit-learn:" +#~ "``--ssl-certfile certificates/server.pem``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "Prior to local training, we need " -#~ "to load the MNIST dataset, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning, " -#~ "and partition the dataset for FL. " -#~ "This can be conveniently achieved using" -#~ " `Flower Datasets `_." -#~ " The :code:`FederatedDataset.load_partition()` method" -#~ " loads the partitioned training set " -#~ "for each partition ID defined in " -#~ "the :code:`--partition-id` argument." +#~ "The ``certificates/server.pem`` file is used" +#~ " to identify the SuperExec and to " +#~ "encrypt the" #~ msgstr "" #~ msgid "" -#~ "Next, the logistic regression model is" -#~ " defined and initialized with " -#~ ":code:`utils.set_initial_params()`." +#~ "``--ssl-keyfile certificates/server.key``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to fit the logistic " -#~ "regression we defined earlier)." +#~ "``--executor-config root-" +#~ "certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify" +#~ " the" #~ msgstr "" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "scikit-learn. Implementing :code:`NumPyClient` " -#~ "usually means defining the following " -#~ "methods (:code:`set_parameters` is optional " -#~ "though):" +#~ "location of the CA certificate file " +#~ "inside the container that the SuperExec" +#~ " executor" #~ msgstr "" -#~ msgid ":code:`set_parameters` (optional)" +#~ msgid "should use to verify the SuperLink's identity." #~ msgstr "" -#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgid "" +#~ "In this mode, the ClientApp is " +#~ "executed as a subprocess within the " +#~ "SuperNode Docker container, rather than " +#~ "running in a separate container. This" +#~ " approach reduces the number of " +#~ "running containers, which can be " +#~ "beneficial for environments with limited " +#~ "resources. However, it also means that" +#~ " the ClientApp is no longer isolated" +#~ " from the SuperNode, which may " +#~ "introduce additional security concerns." #~ msgstr "" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MnistClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "Before running the ClientApp as a " +#~ "subprocess, ensure that the FAB " +#~ "dependencies have been installed in the" +#~ " SuperNode images. This can be done" +#~ " by extending the SuperNode image:" +#~ msgstr "" + +#~ msgid "Dockerfile.supernode" +#~ msgstr "Flower SuperNode" + +#~ msgid "Run the ClientApp as a Subprocess" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "Start the SuperNode with the flag " +#~ "``--isolation subprocess``, which tells the" +#~ " SuperNode to execute the ClientApp " +#~ "as a subprocess:" #~ msgstr "" -#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgid "Run the example and follow the logs of the ServerApp:" #~ msgstr "" #~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy. Note that" -#~ " we also make use of Flower " -#~ "Datasets here to load the test " -#~ "split of the MNIST dataset for " -#~ "server-side evaluation." +#~ "That is all it takes! You can " +#~ "monitor the progress of the run " +#~ "through the logs of the SuperExec." #~ msgstr "" #~ msgid "" -#~ "The :code:`main` contains the server-" -#~ "side parameter initialization " -#~ ":code:`utils.set_initial_params()` as well as " -#~ "the aggregation strategy " -#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" -#~ " the default one, federated averaging " -#~ "(or FedAvg), with two clients and " -#~ "evaluation after each federated learning " -#~ "round. The server can be started " -#~ "with the command " -#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -#~ "strategy=strategy, " -#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ "You will learn how to run the " +#~ "Flower client and server components on" +#~ " two separate machines, with Flower " +#~ "configured to use TLS encryption and " +#~ "persist SuperLink state across restarts. " +#~ "A server consists of a SuperLink " +#~ "and ``SuperExec``. For more details " +#~ "about the Flower architecture, refer to" +#~ " the :doc:`../explanation-flower-architecture`" +#~ " explainer page." #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/sklearn-logreg-" -#~ "mnist`." +#~ "First, set the environment variables " +#~ "``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the" +#~ " IP address from the remote machine." +#~ " For example, if the IP is " +#~ "``192.168.2.33``, execute:" #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a federated XGBoost model" -#~ " on HIGGS dataset using Flower and" -#~ " :code:`xgboost` package. We use a " -#~ "simple example (`full code xgboost-" -#~ "quickstart `_) with two *clients* " -#~ "and one *server* to demonstrate how " -#~ "federated XGBoost works, and then we " -#~ "dive into a more complex example " -#~ "(`full code xgboost-comprehensive " -#~ "`_) to run various experiments." +#~ "Log into the remote machine using " +#~ "``ssh`` and run the following command" +#~ " to start the SuperLink and SuperExec" +#~ " services:" #~ msgstr "" #~ msgid "" -#~ "Since we want to use :code:`xgboost` " -#~ "package to build up XGBoost trees, " -#~ "let's go ahead and install " -#~ ":code:`xgboost`:" +#~ "Specify the remote SuperExec IP " +#~ "addresses and the path to the root" +#~ " certificate in the ``[tool.flwr.federations" +#~ ".remote-superexec]`` table in the " +#~ "``pyproject.toml`` file. Here, we have " +#~ "named our remote federation ``remote-" +#~ "superexec``:" +#~ msgstr "" + +#~ msgid "Run the project and follow the ServerApp logs:" #~ msgstr "" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import xgboost, Flower, Flower Datasets " -#~ "and other related functions:" +#~ "``-p 9091:9091 -p 9092:9092``: Map port" +#~ " ``9091`` and ``9092`` of the " +#~ "container to the same port of" +#~ msgstr "" + +#~ msgid "the host machine, allowing other services to access the Driver API on" #~ msgstr "" #~ msgid "" -#~ "In this example, we split the " -#~ "dataset into 30 partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." -#~ " Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`partition_id`:" +#~ "``http://localhost:9091`` and the Fleet API" +#~ " on ``http://localhost:9092``." +#~ msgstr "" + +#~ msgid "Step 3: Start the SuperNode" #~ msgstr "" #~ msgid "" -#~ "After that, we do train/test splitting" -#~ " on the given partition (client's " -#~ "local data), and transform data format" -#~ " for :code:`xgboost` package." +#~ "``flwr/supernode:|stable_flwr_version|``: This is " +#~ "the name of the image to be " +#~ "run and the specific tag" #~ msgstr "" #~ msgid "" -#~ "The functions of :code:`train_test_split` and" -#~ " :code:`transform_dataset_to_dmatrix` are defined " -#~ "as below:" +#~ "``--supernode-address 0.0.0.0:9094``: Set the" +#~ " address and port number that the " +#~ "SuperNode" +#~ msgstr "" + +#~ msgid "is listening on." +#~ msgstr "" + +#~ msgid "Step 4: Start the ClientApp" #~ msgstr "" #~ msgid "" -#~ "The :code:`num_local_round` represents the " -#~ "number of iterations for local tree " -#~ "boost. We use CPU for the training" -#~ " in default. One can shift it " -#~ "to GPU by setting :code:`tree_method` to" -#~ " :code:`gpu_hist`. We use AUC as " -#~ "evaluation metric." +#~ "The ClientApp Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own ClientApp image. In" +#~ " order to install the FAB " +#~ "dependencies, you will need to create" +#~ " a Dockerfile that extends the " +#~ "ClientApp image and installs the " +#~ "required dependencies." #~ msgstr "" #~ msgid "" -#~ "After loading the dataset we define " -#~ "the Flower client. We follow the " -#~ "general rule to define :code:`XgbClient` " -#~ "class inherited from :code:`fl.client.Client`." +#~ "Create a ClientApp Dockerfile called " +#~ "``Dockerfile.clientapp`` and paste the " +#~ "following code into it:" #~ msgstr "" +#~ msgid "Dockerfile.clientapp" +#~ msgstr "flower 클라이언트 앱" + #~ msgid "" -#~ "All required parameters defined above " -#~ "are passed to :code:`XgbClient`'s constructor." +#~ "to be built from is the " +#~ "``flwr/clientapp image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "Then, we override :code:`get_parameters`, " -#~ ":code:`fit` and :code:`evaluate` methods " -#~ "insides :code:`XgbClient` class as follows." +#~ "``--supernode supernode-1:9094``: Connect to " +#~ "the SuperNode's Fleet API at the " +#~ "address" #~ msgstr "" -#~ msgid "" -#~ "Unlike neural network training, XGBoost " -#~ "trees are not started from a " -#~ "specified random weights. In this case," -#~ " we do not use :code:`get_parameters` " -#~ "and :code:`set_parameters` to initialise model" -#~ " parameters for XGBoost. As a result," -#~ " let's return an empty tensor in " -#~ ":code:`get_parameters` when it is called " -#~ "by the server at the first round." +#~ msgid "``supernode-1:9094``." #~ msgstr "" -#~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. From " -#~ "the second round, we load the " -#~ "global model sent from server to " -#~ "new build Booster object, and then " -#~ "update model weights on local training" -#~ " data with function :code:`local_boost` as" -#~ " follows:" +#~ msgid "Step 5: Start the SuperExec" #~ msgstr "" #~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`bst_input.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." +#~ "Similar to the ClientApp image, you " +#~ "will need to create a Dockerfile " +#~ "that extends the SuperExec image and " +#~ "installs the required FAB dependencies." #~ msgstr "" #~ msgid "" -#~ "In :code:`evaluate`, after loading the " -#~ "global model, we call :code:`bst.eval_set` " -#~ "function to conduct evaluation on valid" -#~ " set. The AUC value will be " -#~ "returned." +#~ "Create a SuperExec Dockerfile called " +#~ "``Dockerfile.superexec`` and paste the " +#~ "following code in:" #~ msgstr "" -#~ msgid "" -#~ "Now, we can create an instance of" -#~ " our class :code:`XgbClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ msgid "Dockerfile.superexec" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` and " -#~ "call :code:`fl.client.start_client()`. The string" -#~ " :code:`\"[::]:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ ":substitution-code:`FROM " +#~ "flwr/superexec:|stable_flwr_version|`: This line " +#~ "specifies that the Docker image" #~ msgstr "" #~ msgid "" -#~ "In a file named :code:`server.py`, " -#~ "import Flower and FedXgbBagging from " -#~ ":code:`flwr.server.strategy`." +#~ "to be built from is the " +#~ "``flwr/superexec image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients. The :code:`config_func` function is" -#~ " to return the current FL round " -#~ "number to client's :code:`fit()` and " -#~ ":code:`evaluate()` methods." +#~ "``ENTRYPOINT [\"flower-superexec\"``: Set the" +#~ " command ``flower-superexec`` to be" #~ msgstr "" -#~ msgid "" -#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," -#~ " we define :code:`FedXgbBagging` inherited " -#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" -#~ " override the :code:`aggregate_fit`, " -#~ ":code:`aggregate_evaluate` and :code:`evaluate` " -#~ "methods as follows:" +#~ msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" #~ msgstr "" -#~ msgid "" -#~ "In :code:`aggregate_fit`, we sequentially " -#~ "aggregate the clients' XGBoost trees by" -#~ " calling :code:`aggregate()` function:" +#~ msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." #~ msgstr "" #~ msgid "" -#~ "In this function, we first fetch " -#~ "the number of trees and the number" -#~ " of parallel trees for the current" -#~ " and previous model by calling " -#~ ":code:`_get_tree_nums`. Then, the fetched " -#~ "information will be aggregated. After " -#~ "that, the trees (containing model " -#~ "weights) are aggregated to generate a" -#~ " new tree model." +#~ "Afterward, in the directory that holds" +#~ " the Dockerfile, execute this Docker " +#~ "command to build the SuperExec image:" #~ msgstr "" -#~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated XGBoost " -#~ "system. The AUC values can be " -#~ "checked in :code:`metrics_distributed`. One " -#~ "can see that the average AUC " -#~ "increases over FL rounds." +#~ msgid "Start the SuperExec container:" #~ msgstr "" #~ msgid "" -#~ "The full `source code " -#~ "`_ for this example can be" -#~ " found in :code:`examples/xgboost-quickstart`." +#~ "``-p 9093:9093``: Map port ``9093`` of" +#~ " the container to the same port " +#~ "of" #~ msgstr "" #~ msgid "" -#~ "To do this, we first customise a" -#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ "the host machine, allowing you to " +#~ "access the SuperExec API on " +#~ "``http://localhost:9093``." #~ msgstr "" -#~ msgid "" -#~ "The customised :code:`ClientManager` samples " -#~ "all available clients in each FL " -#~ "round based on the order of " -#~ "connection to the server. Then, we " -#~ "define a new strategy :code:`FedXgbCyclic` " -#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " -#~ "order to sequentially select only one" -#~ " client in given round and pass " -#~ "the received model to next client." +#~ msgid "``--name superexec``: Assign the name ``superexec`` to the container." #~ msgstr "" #~ msgid "" -#~ "Unlike the original :code:`FedAvg`, we " -#~ "don't perform aggregation here. Instead, " -#~ "we just make a copy of the " -#~ "received client model as global model" -#~ " by overriding :code:`aggregate_fit`." +#~ "``--executor-config superlink=\\\"superlink:9091\\\"``:" +#~ " Configure the SuperExec executor to" #~ msgstr "" -#~ msgid "" -#~ "Also, the customised :code:`configure_fit` and" -#~ " :code:`configure_evaluate` methods ensure the" -#~ " clients to be sequentially selected " -#~ "given FL round:" +#~ msgid "connect to the SuperLink running on port ``9091``." #~ msgstr "" -#~ msgid "" -#~ "In :code:`dataset.py`, we have a " -#~ "function :code:`instantiate_partitioner` to " -#~ "instantiate the data partitioner based " -#~ "on the given :code:`num_partitions` and " -#~ ":code:`partitioner_type`. Currently, we provide " -#~ "four supported partitioner type to " -#~ "simulate the uniformity/non-uniformity in " -#~ "data quantity (uniform, linear, square, " -#~ "exponential)." +#~ msgid "Launch two new ClientApp containers based on the newly built image:" #~ msgstr "" #~ msgid "" -#~ "To facilitate centralised evaluation, we " -#~ "define a function in :code:`server_utils.py`:" +#~ "Setting the ``PROJECT_DIR`` helps Docker " +#~ "Compose locate the ``pyproject.toml`` file," +#~ " allowing it to install dependencies " +#~ "in the SuperExec and SuperNode images" +#~ " correctly." #~ msgstr "" #~ msgid "" -#~ "This function returns a evaluation " -#~ "function which instantiates a :code:`Booster`" -#~ " object and loads the global model" -#~ " weights to it. The evaluation is " -#~ "conducted by calling :code:`eval_set()` " -#~ "method, and the tested AUC value " -#~ "is reported." +#~ "To ensure the ``flwr`` CLI connects " +#~ "to the SuperExec, you need to " +#~ "specify the SuperExec addresses in the" +#~ " ``pyproject.toml`` file." #~ msgstr "" #~ msgid "" -#~ "As for distributed evaluation on the " -#~ "clients, it's same as the quick-" -#~ "start example by overriding the " -#~ ":code:`evaluate()` method insides the " -#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ "Run the quickstart example, monitor the" +#~ " ServerApp logs and wait for the " +#~ "summary to appear:" #~ msgstr "" -#~ msgid "" -#~ "We also provide an example code " -#~ "(:code:`sim.py`) to use the simulation " -#~ "capabilities of Flower to simulate " -#~ "federated XGBoost training on either a" -#~ " single machine or a cluster of " -#~ "machines." +#~ msgid "In the SuperExec logs, you should find the ``Get weights`` line:" #~ msgstr "" -#~ msgid "" -#~ "After importing all required packages, " -#~ "we define a :code:`main()` function to" -#~ " perform the simulation process:" +#~ msgid "Step 7: Add another SuperNode" #~ msgstr "" #~ msgid "" -#~ "We first load the dataset and " -#~ "perform data partitioning, and the " -#~ "pre-processed data is stored in a " -#~ ":code:`list`. After the simulation begins, " -#~ "the clients won't need to pre-" -#~ "process their partitions again." +#~ "You can add more SuperNodes and " +#~ "ClientApps by duplicating their definitions" +#~ " in the ``compose.yml`` file." #~ msgstr "" #~ msgid "" -#~ "After that, we start the simulation " -#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ "Just give each new SuperNode and " +#~ "ClientApp service a unique service name" +#~ " like ``supernode-3``, ``clientapp-3``, etc." #~ msgstr "" -#~ msgid "" -#~ "One of key parameters for " -#~ ":code:`start_simulation` is :code:`client_fn` which" -#~ " returns a function to construct a" -#~ " client. We define it as follows:" +#~ msgid "In ``compose.yml``, add the following:" #~ msgstr "" #~ msgid "" -#~ "In :code:`utils.py`, we define the " -#~ "arguments parsers for clients, server " -#~ "and simulation, allowing users to " -#~ "specify different experimental settings. Let's" -#~ " first see the sever side:" +#~ "If you also want to enable TLS " +#~ "for the new SuperNodes, duplicate the" +#~ " SuperNode definition for each new " +#~ "SuperNode service in the ``with-" +#~ "tls.yml`` file." #~ msgstr "" #~ msgid "" -#~ "This allows user to specify training " -#~ "strategies / the number of total " -#~ "clients / FL rounds / participating " -#~ "clients / clients for evaluation, and" -#~ " evaluation fashion. Note that with " -#~ ":code:`--centralised-eval`, the sever will " -#~ "do centralised evaluation and all " -#~ "functionalities for client evaluation will " -#~ "be disabled." +#~ "Make sure that the names of the" +#~ " services match with the one in " +#~ "the ``compose.yml`` file." +#~ msgstr "" + +#~ msgid "In ``with-tls.yml``, add the following:" +#~ msgstr "" + +#~ msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" #~ msgstr "" #~ msgid "" -#~ "This defines various options for client" -#~ " data partitioning. Besides, clients also" -#~ " have an option to conduct evaluation" -#~ " on centralised test set by setting" -#~ " :code:`--centralised-eval`, as well as " -#~ "an option to perform scaled learning " -#~ "rate based on the number of " -#~ "clients by setting :code:`--scaled-lr`." +#~ "This guide is for users who have" +#~ " already worked with Flower 0.x and" +#~ " want to upgrade to Flower 1.0. " +#~ "Newer versions of Flower (1.12+) are " +#~ "based on a new architecture (previously" +#~ " called Flower Next) and not covered" +#~ " in this guide. After upgrading " +#~ "Flower 0.x projects to Flower 1.0, " +#~ "please refer to :doc:`Upgrade to Flower" +#~ " Next ` to make your project compatible" +#~ " with the lastest version of Flower." +#~ msgstr "" + +#~ msgid "Upgrade to Flower Next" +#~ msgstr "Flower Next 업그레이드" + +#~ msgid "" +#~ "This guide shows how to reuse " +#~ "pre-``1.8`` Flower code with minimum " +#~ "code changes by using the *compatibility" +#~ " layer* in Flower Next. In another" +#~ " guide, we will show how to run" +#~ " Flower Next end-to-end with " +#~ "pure Flower Next APIs." +#~ msgstr "" +#~ "이 가이드에서는 Flower Next의 *호환성 레이어*를 " +#~ "사용하여 최소한의 코드 변경으로 ``1.8`` 이전의 " +#~ "Flower 코드를 재사용하는 방법을 보여줍니다. 다른 " +#~ "가이드에서는 순수한 Flower Next API로 Flower " +#~ "Next를 end-to-end로 실행하는 방법을 " +#~ "보여드리겠습니다." + +#~ msgid "Using Poetry" +#~ msgstr "Poetry 사용" + +#~ msgid "" +#~ "Update the ``flwr`` dependency in " +#~ "``pyproject.toml`` and then reinstall (don't" +#~ " forget to delete ``poetry.lock`` via " +#~ "``rm poetry.lock`` before running ``poetry " +#~ "install``)." #~ msgstr "" +#~ "``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음" +#~ " 다시 설치하세요(``poetry install``을 실행하기 전에 " +#~ "``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" +#~ " 것을 잊지 마세요)." #~ msgid "" -#~ "The full `code " -#~ "`_ for this comprehensive " -#~ "example can be found in :code:`examples" -#~ "/xgboost-comprehensive`." +#~ "Ensure you set the following version " +#~ "constraint in your ``pyproject.toml``:" +#~ msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#~ msgid "" +#~ "This function is deprecated since " +#~ "1.13.0. Use :code: `flwr run` to " +#~ "start a Flower simulation." #~ msgstr "" -#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgid "|c9344c3dfee24383908fabaac40a8504|" #~ msgstr "" -#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgid "|c10cd8f2177641bd8091c7b76d318ff9|" #~ msgstr "" -#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgid "|3c59c315e67945ea8b839381c5deb6c2|" #~ msgstr "" -#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgid "|eadf87e1e20549789512f7aa9199fcff|" #~ msgstr "" -#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgid "|66ce8f21aeb443fca1fc88f727458417|" #~ msgstr "" -#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgid "|f5768015a1014396b4761bb6cb3677f5|" #~ msgstr "" -#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgid "|a746aa3f56064617a4e00f4c6a0cb140|" #~ msgstr "" -#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgid "|cf8f676dd3534a44995c1b40910fd030|" #~ msgstr "" -#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgid "|d1c0e3a4c9dc4bfd88ee6f1fe626edaf|" #~ msgstr "" -#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgid "|1d8d6298a4014ec3a717135bcc7a94f9|" #~ msgstr "" -#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgid "|e3ea79200ff44d459358b9f4713e582b|" #~ msgstr "" -#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgid "|3e1061718a4a49d485764d30a4bfecdd|" #~ msgstr "" -#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgid "|7750e597d1ea4e319f7e0a40539bf214|" #~ msgstr "" -#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgid "|dd4434075f374e99ac07f509a883778f|" #~ msgstr "" -#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgid "Unreleased" +#~ msgstr "릴리즈 빌드" + +#~ msgid "Other changes" +#~ msgstr "**변경사항 스테이징**" + +#~ msgid "|cf5fe148406b44b9a8b842fb01b5a7ea|" #~ msgstr "" -#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgid "|ba25c91426d64cc1ae2d3febc5715b35|" #~ msgstr "" -#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgid "|fca67f83aaab4389aa9ebb4d9c5cd75e|" #~ msgstr "" -#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgid "|6f2e8f95c95443379b0df00ca9824654|" #~ msgstr "" -#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgid "|c0ab3a1a733d4dbc9e1677aa608e8038|" #~ msgstr "" -#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgid "|8f0491bde07341ab9f2e23d50593c0be|" #~ msgstr "" -#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgid "|762fc099899943688361562252c5e600|" #~ msgstr "" -#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgid "|f62d365fd0ae405b975d3ca01e7183fd|" #~ msgstr "" -#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgid "|2c78fc1816b143289f4d909388f92a80|" #~ msgstr "" -#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgid "|4230725aeebe497d8ad84a3efc2a912b|" #~ msgstr "" -#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgid "|64b66a88417240eabe52f5cc55d89d0b|" #~ msgstr "" -#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgid "|726c8eca58bc4f859b06aa24a587b253|" #~ msgstr "" -#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgid "|f9d869e4b33c4093b29cf24ed8dff80a|" #~ msgstr "" -#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgid "|4ab50bc01a9f426a91a2c0cbc3ab7a84|" #~ msgstr "" -#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgid "Request for examples" +#~ msgstr "예시 요청" + +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +#~ msgstr "Llama 2 미세 조정, Hugging Face Transformer와 파이토치 포함" + +#~ msgid "Android ONNX on-device training" +#~ msgstr "Android ONNX 온디바이스 훈련" + +#~ msgid "|f150b8d6e0074250822c9f6f7a8de3e0|" #~ msgstr "" -#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgid "|72772d10debc4abd8373c0bc82985422|" #~ msgstr "" -#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgid "|5815398552ad41d290a3a2631fe8f6ca|" #~ msgstr "" -#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgid "|e6ac20744bf149378be20ac3dc309356|" #~ msgstr "" -#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgid "|a4011ef443c14725b15a8cf33b0e3443|" #~ msgstr "" -#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgid "|a22faa3617404c06803731525e1c609f|" #~ msgstr "" -#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgid "|84a5c9b5041c43c3beab9786197c3e4e|" #~ msgstr "" -#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgid "|b5c4be0b52d4493ba8c4af14d7c2db97|" #~ msgstr "" -#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgid "|c1c784183d18481186ff65dc261d1335|" #~ msgstr "" -#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgid "|669fcd1f44ab42f5bbd196c3cf1ecbc2|" #~ msgstr "" -#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgid "|edfb08758c9441afb6736045a59e154c|" #~ msgstr "" -#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgid "|82338b8bbad24d5ea9df3801aab37852|" #~ msgstr "" -#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgid "|518d994dd2c844898b441da03b858326|" #~ msgstr "" -#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgid "|7bfcfcb57ae5403f8e18486f45ca48b4|" #~ msgstr "" diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 393c04bb0b13..21eafd64d9a2 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"POT-Creation-Date: 2024-11-30 00:31+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -949,9 +949,9 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:13 msgid "" -"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " -"order to add every new change to the changelog (feel free to make manual " -"changes to the changelog afterwards until it looks good)." +"Run ``python3 ./dev/update_changelog.py `` in order to add" +" every new change to the changelog (feel free to make manual changes to " +"the changelog afterwards until it looks good)." msgstr "" #: ../../source/contributor-how-to-release-flower.rst:16 @@ -1274,9 +1274,8 @@ msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" -"Until the Flower core library matures it will be easier to get PR's " -"accepted if they only touch non-core areas of the codebase. Good " -"candidates to get started are:" +"In general, it is easier to get PR's accepted if they only touch non-core" +" areas of the codebase. Good candidates to get started are:" msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:14 @@ -1284,99 +1283,118 @@ msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:15 -msgid "Baselines: See below." +#, python-format +msgid "" +"Open issues: Issues with the tag `good first issue " +"`_." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:16 -msgid "Examples: See below." +#: ../../source/contributor-ref-good-first-contributions.rst:17 +msgid "Baselines: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:19 -msgid "Request for Flower Baselines" +#: ../../source/contributor-ref-good-first-contributions.rst:18 +msgid "Examples: See below." msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:21 +#, fuzzy +msgid "Flower Baselines" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-ref-good-first-contributions.rst:23 msgid "" -"If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines " -"`_." +"If you are not familiar with Flower Baselines, please check our " +"`contributing guide for baselines `_." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:26 msgid "" -"You should then check out the open `issues " +"Then take a look at the open `issues " "`_" -" for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignees, feel free to assign it to yourself and start " -"working on it!" +" for baseline requests. If you find a baseline that you'd like to work " +"on, and it has no assignees, feel free to assign it to yourself and get " +"started!" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:30 +#: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to" -" open a new issue with the baseline request template!" +"If you don't find the baseline you'd like to work on, be sure to open a " +"new issue with the baseline request template!" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:34 -msgid "Request for examples" -msgstr "" +#: ../../source/contributor-ref-good-first-contributions.rst:35 +#, fuzzy +msgid "Usage examples" +msgstr "Exemplo" -#: ../../source/contributor-ref-good-first-contributions.rst:36 +#: ../../source/contributor-ref-good-first-contributions.rst:37 msgid "" -"We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are" -" a few ideas where we'd be happy to accept a PR:" -msgstr "" - -#: ../../source/contributor-ref-good-first-contributions.rst:40 -msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "" - -#: ../../source/contributor-ref-good-first-contributions.rst:41 -msgid "XGBoost" -msgstr "" - -#: ../../source/contributor-ref-good-first-contributions.rst:42 -msgid "Android ONNX on-device training" +"We wish we had more time to write usage examples because they help users " +"to get started with building what they want. If you notice any missing " +"examples that could help others, feel free to contribute!" msgstr "" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:6 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " -"protocol has not been implemented yet, so its diagram and abstraction may" -" not be accurate in practice. The SecAgg protocol can be considered as a " -"special case of the SecAgg+ protocol." +"While this term might be used in other places, here it refers to a series" +" of protocols, including ``SecAgg``, ``SecAgg+``, ``LightSecAgg``, " +"``FastSecAgg``, etc. This concept was first proposed by Bonawitz et al. " +"in `Practical Secure Aggregation for Federated Learning on User-Held Data" +" `_." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 -msgid "The ``SecAgg+`` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +msgid "" +"Secure Aggregation protocols are used to securely aggregate model updates" +" from multiple clients while keeping the updates private. This is done by" +" encrypting the model updates before sending them to the server. The " +"server can decrypt only the aggregated model update without being able to" +" inspect individual updates." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:16 msgid "" -"In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have" -" keys of int type rather than ClientProxy type." +"Flower now provides the ``SecAgg`` and ``SecAgg+`` protocols. While we " +"plan to implement more protocols in the future, one may also implement " +"their own custom secure aggregation protocol via low-level APIs." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:21 +msgid "The ``SecAgg+`` protocol in Flower" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:23 msgid "" -"The Flower server will execute and process received results in the " -"following order:" +"The ``SecAgg+`` protocol is implemented using the ``SecAggPlusWorkflow`` " +"in the ``ServerApp`` and the ``secaggplus_mod`` in the ``ClientApp``. The" +" ``SecAgg`` protocol is a special case of the ``SecAgg+`` protocol, and " +"one may use ``SecAggWorkflow`` and ``secagg_mod`` for that." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 -msgid "The ``LightSecAgg`` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:28 +msgid "" +"You may find a detailed example in the `Secure Aggregation Example " +"`_. The " +"documentation for the ``SecAgg+`` protocol configuration is available at " +"`SecAggPlusWorkflow `_." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 -msgid "Types" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:33 +msgid "" +"The logic of the ``SecAgg+`` protocol is illustrated in the following " +"sequence diagram: the dashed lines represent communication over the " +"network, and the solid lines represent communication within the same " +"process. The ``ServerApp`` is connected to ``SuperLink``, and the " +"``ClientApp`` is connected to the ``SuperNode``; thus, the communication " +"between the ``ServerApp`` and the ``ClientApp`` is done via the " +"``SuperLink`` and the ``SuperNode``." msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 @@ -1952,7 +1970,6 @@ msgid "" msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:357 -#: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" @@ -2031,7 +2048,6 @@ msgid "Get started as a contributor" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/docker/run-as-subprocess.rst:11 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 @@ -2267,17 +2283,11 @@ msgstr "" #: ../../source/docker/enable-tls.rst:4 msgid "" "When operating in a production environment, it is strongly recommended to" -" enable Transport Layer Security (TLS) for each Flower Component to " +" enable Transport Layer Security (TLS) for each Flower component to " "ensure secure communication." msgstr "" -#: ../../source/docker/enable-tls.rst:7 -msgid "" -"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "" - -#: ../../source/docker/enable-tls.rst:12 +#: ../../source/docker/enable-tls.rst:9 msgid "" "For testing purposes, you can generate your own self-signed certificates." " The `Enable SSL connections ``: The name of your SuperLink image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2438,18 +2463,11 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:72 -msgid "SuperNode" -msgstr "" - -#: ../../source/docker/enable-tls.rst:74 -msgid "" -"Assuming that the ``ca.crt`` certificate already exists locally, we can " -"use the flag ``--volume`` to mount the local certificate into the " -"container's ``/app/`` directory." +#: ../../source/docker/enable-tls.rst:79 +msgid "**SuperNode**" msgstr "" -#: ../../source/docker/enable-tls.rst:79 +#: ../../source/docker/enable-tls.rst:83 ../../source/docker/enable-tls.rst:189 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2457,23 +2475,23 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"current working directory of the host machine as a read-only volume at " -"the ``/app/ca.crt``" +"file from the ``superlink-certificates`` directory of the host machine as" +" a read-only" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "directory inside the container." +msgid "volume at the ``/app/ca.crt`` directory inside the container." msgstr "" -#: ../../source/docker/enable-tls.rst -msgid "" -":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " -"the image to be run and the specific" +#: ../../source/docker/enable-tls.rst:101 +msgid "````: The name of your SuperNode image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2486,60 +2504,188 @@ msgstr "" msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/docker/enable-tls.rst:105 -msgid "SuperExec" +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``process``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:109 +msgid "" +"In isolation mode ``process``, the ServerApp and ClientApp run in their " +"own processes. Unlike in isolation mode ``subprocess``, the SuperLink or " +"SuperNode does not attempt to create the respective processes; instead, " +"they must be created externally." +msgstr "" + +#: ../../source/docker/enable-tls.rst:113 +msgid "" +"It is possible to run only the SuperLink in isolation mode ``subprocess``" +" and the SuperNode in isolation mode ``process``, or vice versa, or even " +"both with isolation mode ``process``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:117 +msgid "**SuperLink and ServerApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:122 +msgid "" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local directory " +"into the SuperLink container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory in the current working directory of " +"the host" msgstr "" -#: ../../source/docker/enable-tls.rst:107 +#: ../../source/docker/enable-tls.rst msgid "" -"Assuming all files we need are in the local ``certificates`` directory " -"where the SuperExec will be executed from, we can use the flag " -"``--volume`` to mount the local directory into the ``/app/certificates/``" -" directory of the container:" +"machine as a read-only volume at the ``/app/certificates`` directory " +"inside the container." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " "the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "SuperExec." +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-certfile certificates/server.pem``: Specify the location of the " -"SuperExec's" +"``--isolation process``: Tells the SuperLink that the ServerApp is " +"created by separate" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "independent process. The SuperLink does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:168 +#: ../../source/docker/tutorial-quickstart-docker.rst:207 +msgid "Start the ServerApp container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/enable-tls.rst:181 +msgid "````: The name of your ServerApp image to be run." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"The ``certificates/server.pem`` file is used to identify the SuperExec " -"and to encrypt the" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-keyfile certificates/server.key``: Specify the location of the " -"SuperExec's" +"unencrypted communication. Secure connections will be added in future " +"releases." +msgstr "" + +#: ../../source/docker/enable-tls.rst:185 +msgid "**SuperNode and ClientApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:192 +msgid "Start the SuperNode container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt`` file from the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory of the host machine as a read-only " +"volume at the ``/app/ca.crt``" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "directory inside the container." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"``--executor-config root-" -"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"location of the CA certificate file inside the container that the " -"SuperExec executor" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "should use to verify the SuperLink's identity." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:220 +msgid "Start the ClientApp container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst:233 +msgid "````: The name of your ClientApp image to be run." +msgstr "" + +#: ../../source/docker/enable-tls.rst:237 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:54 +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "" + +#: ../../source/docker/enable-tls.rst:239 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:330 +msgid "pyproject.toml" +msgstr "" + +#: ../../source/docker/enable-tls.rst:246 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "" +"The path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/enable-tls.rst:251 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/enable-tls.rst:254 +msgid "" +"In this example, ``local-deployment-tls`` has been used. Just remember to" +" replace ``local-deployment-tls`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" #: ../../source/docker/index.rst:2 @@ -2600,6 +2746,13 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + #: ../../source/docker/persist-superlink-state.rst:21 msgid "" "In the example below, we create a new directory called ``state``, change " @@ -2697,44 +2850,126 @@ msgid "SuperNode Dockerfile" msgstr "Construindo a imagem do servidor" #: ../../source/docker/run-as-subprocess.rst:2 -msgid "Run ClientApp as a Subprocess" +msgid "Run ServerApp or ClientApp as a Subprocess" msgstr "" #: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"In this mode, the ClientApp is executed as a subprocess within the " -"SuperNode Docker container, rather than running in a separate container. " -"This approach reduces the number of running containers, which can be " -"beneficial for environments with limited resources. However, it also " -"means that the ClientApp is no longer isolated from the SuperNode, which " -"may introduce additional security concerns." +"The SuperLink and SuperNode components support two distinct isolation " +"modes, allowing for flexible deployment and control:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:13 +#: ../../source/docker/run-as-subprocess.rst:7 msgid "" -"Before running the ClientApp as a subprocess, ensure that the FAB " -"dependencies have been installed in the SuperNode images. This can be " -"done by extending the SuperNode image:" +"Subprocess Mode: In this configuration (default), the SuperLink and " +"SuperNode take responsibility for launching the ServerApp and ClientApp " +"processes internally. This differs from the ``process`` isolation-mode " +"which uses separate containers, as demonstrated in the :doc:`tutorial-" +"quickstart-docker` guide." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:12 +msgid "" +"Using the ``subprocess`` approach reduces the number of running " +"containers, which can be beneficial for environments with limited " +"resources. However, it also means that the applications are not isolated " +"from their parent containers, which may introduce additional security " +"concerns." msgstr "" #: ../../source/docker/run-as-subprocess.rst:17 -msgid "Dockerfile.supernode" +msgid "" +"Process Mode: In this mode, the ServerApp and ClientApps run in " +"completely separate processes. Unlike the alternative Subprocess mode, " +"the SuperLink or SuperNode does not attempt to create or manage these " +"processes. Instead, they must be started externally." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:22 +msgid "" +"Both modes can be mixed for added flexibility. For instance, you can run " +"the SuperLink in ``subprocess`` mode while keeping the SuperNode in " +"``process`` mode, or vice versa." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:25 +msgid "" +"To run the SuperLink and SuperNode in isolation mode ``process``, refer " +"to the :doc:`tutorial-quickstart-docker` guide. To run them in " +"``subprocess`` mode, follow the instructions below." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:33 +#: ../../source/docker/run-as-subprocess.rst:74 +msgid "**Prerequisites**" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:35 +msgid "" +"1. Before running the ServerApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperLink images. This can be " +"done by extending the SuperLink image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:38 +#, fuzzy +msgid "superlink.Dockerfile" +msgstr "Construindo a imagem do servidor" + +#: ../../source/docker/run-as-subprocess.rst:52 +msgid "" +"2. Next, build the SuperLink Docker image by running the following " +"command in the directory where Dockerfile is located:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:59 +msgid "**Run the ServerApp as a Subprocess**" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:61 +msgid "" +"Start the SuperLink and run the ServerApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:76 +msgid "" +"1. Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:31 +#: ../../source/docker/run-as-subprocess.rst:80 +#, fuzzy +msgid "supernode.Dockerfile" +msgstr "Construindo a imagem do servidor" + +#: ../../source/docker/run-as-subprocess.rst:94 msgid "" -"Next, build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile is located:" +"2. Next, build the SuperNode Docker image by running the following " +"command in the directory where Dockerfile is located:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:39 -msgid "Run the ClientApp as a Subprocess" +#: ../../source/docker/run-as-subprocess.rst:101 +msgid "**Run the ClientApp as a Subprocess**" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:41 +#: ../../source/docker/run-as-subprocess.rst:103 msgid "" -"Start the SuperNode with the flag ``--isolation subprocess``, which tells" -" the SuperNode to execute the ClientApp as a subprocess:" +"Start the SuperNode and run the ClientApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 @@ -2780,7 +3015,9 @@ msgstr "Verifique que o serviço Docker está rodando." #: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 -msgid "Docker Compose is `installed `_." +msgid "" +"Docker Compose V2 is `installed " +"`_." msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 @@ -2800,29 +3037,13 @@ msgid "" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 -msgid "Build and start the services using the following command:" -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 -msgid "" -"Append the following lines to the end of the ``pyproject.toml`` file and " -"save it:" -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 -#: ../../source/docker/tutorial-quickstart-docker.rst:324 -msgid "pyproject.toml" -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:45 msgid "" -"You can customize the string that follows ``tool.flwr.federations.`` to " -"fit your needs. However, please note that the string cannot contain a dot" -" (``.``)." +"Export the version of Flower that your environment uses. Then, build and " +"start the services using the following command:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -2830,129 +3051,121 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 -#, fuzzy -msgid "Run the example:" -msgstr "Exemplo" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 -msgid "Follow the logs of the SuperExec service:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:72 +msgid "Run the example and follow the logs of the ``ServerApp`` :" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:78 msgid "" "That is all it takes! You can monitor the progress of the run through the" -" logs of the SuperExec." +" logs of the ``ServerApp``." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:91 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 msgid "Limitations" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 msgid "Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 msgid "quickstart-fastai" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/ref-changelog.md:236 ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:879 ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:1001 ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1132 msgid "None" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 msgid "quickstart-huggingface" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 msgid "quickstart-jax" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +msgid "quickstart-mlcube" +msgstr "" + #: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 msgid "" "The example has not yet been updated to work with the latest ``flwr`` " "version." msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 -msgid "quickstart-mlcube" -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 msgid "quickstart-mlx" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "quickstart-monai" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "quickstart-pandas" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 msgid "quickstart-pytorch" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 msgid "quickstart-tabnet" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 msgid "quickstart-tensorflow" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 -msgid "Only runs on AMD64." -msgstr "" - #: ../../source/docker/set-environment-variables.rst:2 msgid "Set Environment Variables" msgstr "" @@ -2979,8 +3192,8 @@ msgid "" "You will learn how to run the Flower client and server components on two " "separate machines, with Flower configured to use TLS encryption and " "persist SuperLink state across restarts. A server consists of a SuperLink" -" and ``SuperExec``. For more details about the Flower architecture, refer" -" to the :doc:`../explanation-flower-architecture` explainer page." +" and a ``ServerApp``. For more details about the Flower architecture, " +"refer to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 @@ -3035,129 +3248,139 @@ msgstr "" msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:47 msgid "" "Use the ``certs.yml`` Compose file to generate your own self-signed " "certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:212 msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:54 msgid "" "For production environments, you may have to use dedicated services to " "obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:57 msgid "" -"First, set the environment variables ``SUPERLINK_IP`` and " -"``SUPEREXEC_IP`` with the IP address from the remote machine. For " -"example, if the IP is ``192.168.2.33``, execute:" +"First, set the environment variable ``SUPERLINK_IP`` with the IP address " +"from the remote machine. For example, if the IP is ``192.168.2.33``, " +"execute:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:64 msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:71 msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:73 msgid "" "Use the method that works best for you to copy the ``server`` directory, " -"the certificates, and your Flower project to the remote machine." +"the certificates, and the ``pyproject.toml`` file of your Flower project " +"to the remote machine." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:86 msgid "Step 3: Start the Flower Server Components" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:88 msgid "" "Log into the remote machine using ``ssh`` and run the following command " -"to start the SuperLink and SuperExec services:" +"to start the SuperLink and ``ServerApp`` services:" msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``server`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the server ``compose.yml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:107 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions. After exporting the ``PROJECT_DIR`` (after line " +"4), run the following commands:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:116 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:165 +msgid "" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:118 msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:121 msgid "Step 4: Start the Flower Client Components" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:123 msgid "" "On your local machine, run the following command to start the client " "components:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:133 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``client`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the client ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:137 msgid "Step 5: Run Your Flower Project" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Specify the remote SuperExec IP addresses and the path to the root " -"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " -"the ``pyproject.toml`` file. Here, we have named our remote federation " -"``remote-superexec``:" +"Specify the remote SuperLink IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-deployment]`` table in" +" the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-deployment``:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:143 msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 -msgid "" -"The Path of the ``root-certificates`` should be relative to the location " -"of the ``pyproject.toml`` file." -msgstr "" - -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 -msgid "To run the project, execute:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:155 +msgid "Run the project and follow the ``ServerApp`` logs:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 msgid "" "That's it! With these steps, you've set up Flower on two separate " "machines and are ready to start using it." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:165 msgid "Step 6: Clean Up" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:167 msgid "Shut down the Flower client components:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:174 msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" @@ -3178,16 +3401,16 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:33 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:39 +#: ../../source/docker/tutorial-quickstart-docker.rst:38 msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:44 msgid "" "User-defined networks, such as ``flwr-network``, enable IP resolution of " "container names, a feature absent in the default bridge network. This " @@ -3195,51 +3418,54 @@ msgid "" "first." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#: ../../source/docker/tutorial-quickstart-docker.rst:49 msgid "Step 2: Start the SuperLink" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 -#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:64 +#: ../../source/docker/tutorial-quickstart-docker.rst:51 msgid "Open your terminal and run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "Understand the command" +msgid "" +"``-p 9091:9091 -p 9092:9092 -p 9093:9093``: Map port ``9091``, ``9092`` " +"and ``9093`` of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " -"container to the same port of" +"container to the same port of the host machine, allowing other services " +"to access the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "the host machine, allowing other services to access the Driver API on" +msgid "" +"ServerAppIO API on ``http://localhost:9091``, the Fleet API on " +"``http://localhost:9092`` and" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgid "the Exec API on ``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:71 -#: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:219 -#: ../../source/docker/tutorial-quickstart-docker.rst:309 +#: ../../source/docker/tutorial-quickstart-docker.rst:74 +#: ../../source/docker/tutorial-quickstart-docker.rst:114 +#: ../../source/docker/tutorial-quickstart-docker.rst:223 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:72 +#: ../../source/docker/tutorial-quickstart-docker.rst:75 msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:73 -#: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:220 -#: ../../source/docker/tutorial-quickstart-docker.rst:311 +#: ../../source/docker/tutorial-quickstart-docker.rst:76 +#: ../../source/docker/tutorial-quickstart-docker.rst:116 +#: ../../source/docker/tutorial-quickstart-docker.rst:225 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3261,15 +3487,25 @@ msgstr "" msgid "unencrypted communication." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:80 -msgid "Step 3: Start the SuperNode" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"independent process. The SuperLink does not attempt to create it. You can" +" learn more about" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the different process modes here: :doc:`run-as-subprocess`." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:86 +msgid "Step 3: Start the SuperNodes" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:82 +#: ../../source/docker/tutorial-quickstart-docker.rst:88 msgid "Start two SuperNode containers." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:84 +#: ../../source/docker/tutorial-quickstart-docker.rst:90 msgid "Start the first container:" msgstr "" @@ -3285,18 +3521,18 @@ msgstr "" msgid "``http://localhost:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:115 msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " -"to be run and the specific tag" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: This is the " +"name of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "of the image." +msgid "image to be run and the specific tag of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst @@ -3321,49 +3557,52 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--supernode-address 0.0.0.0:9094``: Set the address and port number " -"that the SuperNode" +"``--clientappio-api-address 0.0.0.0:9094``: Set the address and port " +"number that the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "is listening on." +msgid "SuperNode is listening on to communicate with the ClientApp. If" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--isolation process``: Tells the SuperNode that the ClientApp is " -"created by separate" +"two SuperNodes are started on the same machine, set two different port " +"numbers for each SuperNode." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "independent process. The SuperNode does not attempt to create it." +msgid "" +"(E.g. In the next step, we set the second SuperNode container to listen " +"on port 9095)" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#: ../../source/docker/tutorial-quickstart-docker.rst:132 msgid "Start the second container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:142 -msgid "Step 4: Start the ClientApp" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +msgid "Step 4: Start a ServerApp" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:144 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 msgid "" -"The ClientApp Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own ClientApp image. In order to " +"The ServerApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ServerApp image. In order to " "install the FAB dependencies, you will need to create a Dockerfile that " -"extends the ClientApp image and installs the required dependencies." +"extends the ServerApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:149 +#: ../../source/docker/tutorial-quickstart-docker.rst:157 msgid "" -"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " -"the following code into it:" +"Create a ServerApp Dockerfile called ``serverapp.Dockerfile`` and paste " +"the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:152 -msgid "Dockerfile.clientapp" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:160 +#, fuzzy +msgid "serverapp.Dockerfile" +msgstr "Construindo a imagem do servidor" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "Understand the Dockerfile" @@ -3371,13 +3610,13 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/serverapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/clientapp image``, version :substitution-" +"to be built from is the ``flwr/serverapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" @@ -3435,7 +3674,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"``ENTRYPOINT [\"flwr-serverapp\"]``: Set the command ``flwr-serverapp`` " "to be" msgstr "" @@ -3443,7 +3682,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:186 +#: ../../source/docker/tutorial-quickstart-docker.rst:194 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3452,204 +3691,197 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#: ../../source/docker/tutorial-quickstart-docker.rst:200 msgid "" -"Next, build the ClientApp Docker image by running the following command " -"in the directory where the Dockerfile is located:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst:201 -msgid "" -"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " -"Remember that these values are merely examples, and you can customize " -"them according to your requirements." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the ServerApp image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:205 -msgid "Start the first ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst:224 +msgid "``--name serverapp``: Assign the name ``serverapp`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +"``flwr_serverapp:0.0.1``: This is the name of the image to be run and the" " specific tag" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" + #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" -" the address" +"``--serverappio-api-address superlink:9091``: Connect to the SuperLink's " +"ServerAppIO API" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``supernode-1:9094``." +msgid "at the address ``superlink:9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:226 -msgid "Start the second ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst:234 +msgid "Step 5: Start the ClientApp" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 -msgid "Step 5: Start the SuperExec" +#: ../../source/docker/tutorial-quickstart-docker.rst:236 +msgid "" +"The procedure for building and running a ClientApp image is almost " +"identical to the ServerApp image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" -"The procedure for building and running a SuperExec image is almost " -"identical to the ClientApp image." +"Similar to the ServerApp image, you will need to create a Dockerfile that" +" extends the ClientApp image and installs the required FAB dependencies." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"Similar to the ClientApp image, you will need to create a Dockerfile that" -" extends the SuperExec image and installs the required FAB dependencies." +"Create a ClientApp Dockerfile called ``clientapp.Dockerfile`` and paste " +"the following code into it:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:245 -msgid "" -"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " -"the following code in:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst:248 -msgid "Dockerfile.superexec" +msgid "clientapp.Dockerfile" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/superexec image``, version :substitution-" +"to be built from is the ``flwr/clientapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" -"superexec`` to be" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:283 +#: ../../source/docker/tutorial-quickstart-docker.rst:286 msgid "" -"Afterward, in the directory that holds the Dockerfile, execute this " -"Docker command to build the SuperExec image:" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:290 -msgid "Start the SuperExec container:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgid "Start the first ClientApp container:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"the host machine, allowing you to access the SuperExec API on " -"``http://localhost:9093``." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst:310 -msgid "``--name superexec``: Assign the name ``superexec`` to the container." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" -" specific tag" +"``--clientappio-api-address supernode-1:9094``: Connect to the " +"SuperNode's ClientAppIO" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " -"SuperExec executor to" +msgid "API at the address ``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "connect to the SuperLink running on port ``9091``." +#: ../../source/docker/tutorial-quickstart-docker.rst:314 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:320 +#: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#: ../../source/docker/tutorial-quickstart-docker.rst:328 msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:331 -msgid "Run the ``quickstart-docker`` project by executing the command:" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:337 -msgid "Follow the SuperExec logs to track the execution of the run:" +msgid "" +"Run the ``quickstart-docker`` project and follow the ServerApp logs to " +"track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:345 msgid "Step 7: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:346 +#: ../../source/docker/tutorial-quickstart-docker.rst:347 msgid "" "Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#: ../../source/docker/tutorial-quickstart-docker.rst:350 msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:356 -msgid "Stop the current ClientApp containers:" +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +msgid "Stop the current ServerApp and ClientApp containers:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:361 +msgid "" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:364 +msgid "If you haven’t made any changes, you can skip steps 2 through 4." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#: ../../source/docker/tutorial-quickstart-docker.rst:370 #, fuzzy -msgid "Rebuild the FAB and ClientApp image:" +msgid "Rebuild ServerApp and ClientApp images:" msgstr "Construindo a imagem base" -#: ../../source/docker/tutorial-quickstart-docker.rst:368 -msgid "Launch two new ClientApp containers based on the newly built image:" +#: ../../source/docker/tutorial-quickstart-docker.rst:377 +msgid "" +"Launch one new ServerApp and two new ClientApp containers based on the " +"newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:383 +#: ../../source/docker/tutorial-quickstart-docker.rst:402 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:390 +#: ../../source/docker/tutorial-quickstart-docker.rst:409 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:392 +#: ../../source/docker/tutorial-quickstart-docker.rst:411 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 -#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker.rst:423 msgid "Where to Go Next" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:406 +#: ../../source/docker/tutorial-quickstart-docker.rst:425 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:407 +#: ../../source/docker/tutorial-quickstart-docker.rst:426 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:427 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3675,173 +3907,158 @@ msgstr "" msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:39 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:46 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " -"SuperExec and SuperNode images correctly." +"``ServerApp`` and ``ClientApp`` images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "Step 2: Run Flower in Insecure Mode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:53 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:61 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:175 msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 -msgid "" -"``-f compose.yml``: Specify the YAML file that contains the basic Flower " -"service definitions." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:181 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:182 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:77 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 msgid "" -"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" -" the SuperExec addresses in the ``pyproject.toml`` file." +"To ensure the ``flwr`` CLI connects to the SuperLink, you need to specify" +" the SuperLink addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:85 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:225 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 -msgid "Execute the command to run the quickstart example:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 -msgid "Monitor the SuperExec logs and wait for the summary to appear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:94 +msgid "" +"Run the quickstart example, monitor the ``ServerApp`` logs and wait for " +"the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:102 msgid "Step 4: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:121 msgid "Rebuild and restart the services." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 -msgid "" -"If you have modified the dependencies listed in your ``pyproject.toml`` " -"file, it is essential to rebuild images." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:128 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:130 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:136 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 -msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the ``ServerApp`` logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:155 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:157 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:162 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 -msgid "" -"For more information, consult the following page: :doc:`persist-" -"superlink-state`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:167 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +msgid "Run the command:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 -msgid "Run the command:" +msgid "" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst @@ -3861,17 +4078,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:238 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:197 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -3879,119 +4096,102 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:205 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:207 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 -msgid "Step 7: Add another SuperNode" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 -msgid "" -"You can add more SuperNodes and ClientApps by duplicating their " -"definitions in the ``compose.yml`` file." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:245 +msgid "Step 7: Add another SuperNode and ClientApp" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 msgid "" -"Just give each new SuperNode and ClientApp service a unique service name " -"like ``supernode-3``, ``clientapp-3``, etc." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 -msgid "In ``compose.yml``, add the following:" +"You can add more SuperNodes and ClientApps by uncommenting their " +"definitions in the ``compose.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:250 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 -msgid "" -"If you also want to enable TLS for the new SuperNodes, duplicate the " -"SuperNode definition for each new SuperNode service in the ``with-" -"tls.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:302 msgid "" -"Make sure that the names of the services match with the one in the " -"``compose.yml`` file." +"If you also want to enable TLS for the new SuperNode, uncomment the " +"definition in the ``with-tls.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 -msgid "In ``with-tls.yml``, add the following:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:305 +msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 -msgid "with-tls.yml" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:326 +msgid "Restart the services with:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:335 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 -msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:340 +msgid "Comment out the lines 2-6 and uncomment the lines 7-13:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:342 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 msgid "Restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:376 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:378 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 msgid "Remove all services and volumes:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:402 msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "" @@ -4014,446 +4214,90 @@ msgid "" "tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/explanation-differential-privacy.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." -msgstr "" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "Centralized Training" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called ``cifar.py``, revised part is " -"shown below:" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 -msgid "You can now run your machine learning workload:" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/explanation-differential-privacy.rst:27 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." -msgstr "" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 -msgid "Federated Training" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only ``get_parameters`` and ``set_parameters`` function " -"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" -" PyTorch - From Centralized To Federated `. first." +#: ../../source/explanation-differential-privacy.rst:32 +msgid "Formal Definition" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -"``server.py`` keeps unchanged, we can start the server directly." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" -"Finally, we will revise our *client* logic by changing ``get_parameters``" -" and ``set_parameters`` in ``client.py``, we will exclude batch " -"normalization parameters from model parameter list when sending to or " -"receiving from the server." -msgstr "" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 -msgid "Now, you can now open two additional terminal windows and run" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 +#: ../../source/explanation-differential-privacy.rst:47 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 -#: ../../source/tutorial-quickstart-jax.rst:319 -msgid "Next Steps" +#: ../../source/explanation-differential-privacy.rst:56 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 -msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 -msgid "" -"Let's create a new file called ``cifar.py`` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as ``torch`` and ``torchvision``) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the" -" federated learning components at a later point." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in ``class Net()``." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 -msgid "" -"The ``load_data()`` function loads the CIFAR-10 training and test sets. " -"The ``transform`` normalized the data after loading." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 -msgid "" -"We now need to define the training (function ``train()``) which loops " -"over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 -msgid "" -"The evaluation of the model is defined in the function ``test()``. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in ``cifar.py`` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 -#: ../../source/tutorial-quickstart-jax.rst:147 -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -"``server.py`` first. The *server* needs to import the Flower package " -"``flwr``. Next, we use the ``start_server`` function to start a server " -"and tell it to perform three rounds of federated learning." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 -#: ../../source/tutorial-quickstart-jax.rst:161 -msgid "We can already start the *server*:" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined centralized training in ``cifar.py``. Our " -"*client* needs to import ``flwr``, but also ``torch`` to update the " -"parameters on our PyTorch model:" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " -"than ``Client`` if you use a framework with good NumPy interoperability " -"(like PyTorch or TensorFlow/Keras) because it avoids some of the " -"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " -"implement four methods, two methods for getting/setting model parameters," -" one method for training the model, and one method for testing the model:" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#, fuzzy -msgid "``set_parameters``" -msgstr "``SETUPTOOLS_VERSION``" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 -#: ../../source/tutorial-quickstart-jax.rst:192 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#: ../../source/tutorial-quickstart-jax.rst:194 -msgid "" -"loop over the list of model parameters received as NumPy ``ndarray``'s " -"(think list of neural network layers)" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#: ../../source/tutorial-quickstart-scikitlearn.rst:129 -msgid "``get_parameters``" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -msgid "" -"get the model parameters and return them as a list of NumPy ``ndarray``'s" -" (which is what ``flwr.client.NumPyClient`` expects)" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "``fit``" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 -#: ../../source/tutorial-quickstart-jax.rst:200 -#: ../../source/tutorial-quickstart-jax.rst:205 -msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -msgid "train the model on the local training set" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 -msgid "get the updated local model weights and return them to the server" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -#: ../../source/tutorial-quickstart-jax.rst:208 -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -msgid "``evaluate``" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 -#: ../../source/tutorial-quickstart-jax.rst:207 -msgid "evaluate the updated model on the local test set" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -msgid "return the local loss and accuracy to the server" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 -msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " -"So what we really do here is we tell Flower through our ``NumPyClient`` " -"subclass which of our already defined functions to call for training and " -"evaluation. We included type annotations to give you a better " -"understanding of the data types that get passed around." -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a ``CifarClient``, and starts this client. You load your " -"data and model by using ``cifar.py``. Start ``CifarClient`` with the " -"function ``fl.client.start_client()`` by pointing it at the same IP " -"address we used in ``server.py``:" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 -#: ../../source/tutorial-quickstart-jax.rst:309 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:14 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:4 -msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:9 -msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:16 -msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:27 -msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:32 -msgid "Formal Definition" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:34 -msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:42 -msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:47 -msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:56 -msgid "Differential Privacy in Machine Learning" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" "DP can be utilized in machine learning to preserve the privacy of the " "training data. Differentially private machine learning algorithms are " @@ -4588,7 +4432,7 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:141 -#: ../../source/how-to-use-differential-privacy.rst:113 +#: ../../source/how-to-use-differential-privacy.rst:114 msgid "Local Differential Privacy" msgstr "" @@ -4657,7 +4501,6 @@ msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" #: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated evaluation" msgstr "" @@ -4684,11 +4527,11 @@ msgid "" "return evaluation results:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:61 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "Custom Strategies" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:63 +#: ../../source/explanation-federated-evaluation.rst:72 msgid "" "The ``Strategy`` abstraction provides a method called ``evaluate`` that " "can directly be used to evaluate the current global model parameters. The" @@ -4696,31 +4539,32 @@ msgid "" "aggregation and before federated evaluation (see next paragraph)." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:69 +#: ../../source/explanation-federated-evaluation.rst:78 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated Evaluation" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:72 +#: ../../source/explanation-federated-evaluation.rst:81 msgid "Implementing Federated Evaluation" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:74 +#: ../../source/explanation-federated-evaluation.rst:83 msgid "" "Client-side evaluation happens in the ``Client.evaluate`` method and can " "be configured from the server side." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/explanation-federated-evaluation.rst:116 msgid "Configuring Federated Evaluation" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:110 +#: ../../source/explanation-federated-evaluation.rst:118 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:113 +#: ../../source/explanation-federated-evaluation.rst:121 msgid "" "``fraction_evaluate``: a ``float`` defining the fraction of clients that " "will be selected for evaluation. If ``fraction_evaluate`` is set to " @@ -4729,7 +4573,7 @@ msgid "" "``0.0``, federated evaluation will be disabled." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:118 +#: ../../source/explanation-federated-evaluation.rst:126 msgid "" "``min_evaluate_clients``: an ``int``: the minimum number of clients to be" " selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " @@ -4737,7 +4581,7 @@ msgid "" "to the server, then ``20`` clients will be selected for evaluation." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:122 +#: ../../source/explanation-federated-evaluation.rst:130 msgid "" "``min_available_clients``: an ``int`` that defines the minimum number of " "clients which need to be connected to the server before a round of " @@ -4746,7 +4590,7 @@ msgid "" "connected before it continues to sample clients for evaluation." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:127 +#: ../../source/explanation-federated-evaluation.rst:135 msgid "" "``on_evaluate_config_fn``: a function that returns a configuration " "dictionary which will be sent to the selected clients. The function will " @@ -4755,26 +4599,27 @@ msgid "" "the number of validation steps performed." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:157 +#: ../../source/explanation-federated-evaluation.rst:177 msgid "Evaluating Local Model Updates During Training" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:159 +#: ../../source/explanation-federated-evaluation.rst:179 msgid "" "Model parameters can also be evaluated during training. ``Client.fit`` " "can return arbitrary evaluation results as a dictionary:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:201 +#: ../../source/explanation-federated-evaluation.rst:220 msgid "Full Code Example" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:203 +#: ../../source/explanation-federated-evaluation.rst:222 msgid "" "For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"evaluation, see the `Advanced TensorFlow Example " +"`_" +" (the same approach can be applied to workloads implemented in any other " +"framework)." msgstr "" #: ../../source/explanation-flower-architecture.rst:-1 @@ -4976,1005 +4821,832 @@ msgid "" "a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/explanation-flower-architecture.rst:121 -msgid "" -"To help you start and manage all of the concurrently executing training " -"runs, Flower offers one additional long-running server-side service " -"called **SuperExec**. When you type ``flwr run`` to start a new training " -"run, the ``flwr`` CLI bundles your local project (mainly your " -"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " -"**SuperExec** will then take care of starting and managing your " -"``ServerApp``, which in turn selects SuperNodes to execute your " -"``ClientApp``." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:128 -msgid "" -"This architecture allows many users to (concurrently) run their projects " -"on the same federation, simply by typing ``flwr run`` on their local " -"developer machine." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "Flower Deployment Engine with SuperExec" -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "The SuperExec service for managing concurrent training runs in Flower." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:141 +#: ../../source/explanation-flower-architecture.rst:123 msgid "" "This explanation covers the Flower Deployment Engine. An explanation " "covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:146 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" "As we continue to enhance Flower at a rapid pace, we'll periodically " "update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "" - -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "" - -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" +#: ../../source/how-to-aggregate-evaluation-results.rst:4 +msgid "" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +msgid "" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" +#: ../../source/how-to-aggregate-evaluation-results.rst:38 +msgid "" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" +#: ../../source/how-to-authenticate-supernodes.rst:4 +msgid "" +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" +#: ../../source/how-to-authenticate-supernodes.rst:9 +msgid "" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" +#: ../../source/how-to-authenticate-supernodes.rst:10 +msgid "" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" +#: ../../source/how-to-authenticate-supernodes.rst:14 +msgid "" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" +#: ../../source/how-to-authenticate-supernodes.rst:20 +msgid "" +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" +#: ../../source/how-to-authenticate-supernodes.rst:24 +msgid "" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" +#: ../../source/how-to-authenticate-supernodes.rst:30 +msgid "" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" +#: ../../source/how-to-authenticate-supernodes.rst:49 +msgid "" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" +#: ../../source/how-to-authenticate-supernodes.rst:53 +msgid "" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" +#: ../../source/how-to-authenticate-supernodes.rst:57 +msgid "" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" +#: ../../source/how-to-authenticate-supernodes.rst:64 +msgid "" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#: ../../source/how-to-authenticate-supernodes.rst:73 +msgid "" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" +#: ../../source/how-to-authenticate-supernodes.rst:85 +msgid "" +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" +#: ../../source/how-to-authenticate-supernodes.rst:93 +msgid "" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-tls-connections.rst:108 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/how-to-authenticate-supernodes.rst:102 +msgid "" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure Clients" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" +#: ../../source/how-to-configure-clients.rst:4 +msgid "" +"Flower provides the ability to send configuration values to clients, " +"allowing server-side control over client behavior. This feature enables " +"flexible and dynamic adjustment of client-side hyperparameters, improving" +" collaboration and experimentation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/how-to-configure-clients.rst:11 +msgid "" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These values must be of type ``Scalar``, which includes " +"``bool``, ``bytes``, ``float``, ``int``, or ``str`` (or equivalent types " +"in different languages). Scalar is the value type directly supported by " +"Flower for these configurations." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" +#: ../../source/how-to-configure-clients.rst:17 +msgid "For example, a ``FitConfig`` dictionary might look like this:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/how-to-configure-clients.rst:28 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +"Flower serializes these configuration dictionaries (or *config dicts* for" +" short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" +#: ../../source/how-to-configure-clients.rst:34 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. To send collections, convert them to a supported type " +"(e.g., JSON string) and decode on the client side." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" +#: ../../source/how-to-configure-clients.rst:38 +#, fuzzy +msgid "Example:" +msgstr "Exemplo" + +#: ../../source/how-to-configure-clients.rst:51 +msgid "Configuration through Built-in Strategies" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" +#: ../../source/how-to-configure-clients.rst:53 +msgid "" +"Flower provides configuration options to control client behavior " +"dynamically through ``FitConfig`` and ``EvaluateConfig``. These " +"configurations allow server-side control over client-side parameters such" +" as batch size, number of local epochs, learning rate, and evaluation " +"settings, improving collaboration and experimentation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" +#: ../../source/how-to-configure-clients.rst:59 +msgid "``FitConfig`` and ``EvaluateConfig``" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/how-to-configure-clients.rst:61 msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These dictionaries enable the server to adjust client-" +"side hyperparameters and monitor progress effectively." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 -msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +#: ../../source/how-to-configure-clients.rst:67 +msgid "``FitConfig``" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"``FitConfig`` specifies the hyperparameters for training rounds, such as " +"the batch size, number of local epochs, and other parameters that " +"influence training." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/how-to-configure-clients.rst:72 +msgid "For example, a ``fit_config`` callback might look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +"You can then pass this ``fit_config`` callback to a built-in strategy " +"such as ``FedAvg``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/how-to-configure-clients.rst:101 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"On the client side, the configuration is received in the ``fit`` method, " +"where it can be read and used:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:124 +msgid "``EvaluateConfig``" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/how-to-configure-clients.rst:126 msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"``EvaluateConfig`` specifies hyperparameters for the evaluation process, " +"such as the batch size, evaluation frequency, or metrics to compute " +"during evaluation." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:129 +msgid "For example, an ``evaluate_config`` callback might look like this:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/how-to-configure-clients.rst:143 msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"You can pass this ``evaluate_config`` callback to a built-in strategy " +"like ``FedAvg``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/how-to-configure-clients.rst:151 msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"On the client side, the configuration is received in the ``evaluate`` " +"method, where it can be used during the evaluation process:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" +#: ../../source/how-to-configure-clients.rst:175 +msgid "Example: Sending Training Configurations" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/how-to-configure-clients.rst:177 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"Imagine we want to send (a) the batch size, (b) the current global round," +" and (c) the number of local epochs. Our configuration function could " +"look like this:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/how-to-configure-clients.rst:190 +msgid "" +"To use this function with a built-in strategy like ``FedAvg``, pass it to" +" the ``FedAvg`` constructor (typically in your ``server_fn``):" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" +#: ../../source/how-to-configure-clients.rst:211 +msgid "Client-Side Configuration" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" +#: ../../source/how-to-configure-clients.rst:213 +msgid "" +"On the client side, configurations are received as input to the ``fit`` " +"and ``evaluate`` methods. For example:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" +#: ../../source/how-to-configure-clients.rst:230 +msgid "Dynamic Configurations per Round" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" +#: ../../source/how-to-configure-clients.rst:232 +msgid "" +"Configuration functions are called at the beginning of every round. This " +"allows for dynamic adjustments based on progress. For example, you can " +"increase the number of local epochs in later rounds:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" +#: ../../source/how-to-configure-clients.rst:247 +msgid "Customizing Client Configurations" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" +#: ../../source/how-to-configure-clients.rst:249 +msgid "" +"In some cases, it may be necessary to send different configurations to " +"individual clients. To achieve this, you can create a custom strategy by " +"extending a built-in one, such as ``FedAvg``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." +#: ../../source/how-to-configure-clients.rst:254 +msgid "Example: Client-Specific Configuration" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" +#: ../../source/how-to-configure-clients.rst:273 +msgid "Next, use this custom strategy as usual:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +#: ../../source/how-to-configure-clients.rst:287 +msgid "Summary of Enhancements" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." +#: ../../source/how-to-configure-clients.rst:289 +msgid "**Dynamic Configurations**: Enables per-round adjustments via functions." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +#: ../../source/how-to-configure-clients.rst:290 +msgid "**Advanced Customization**: Supports client-specific strategies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/how-to-configure-clients.rst:291 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"**Client-Side Integration**: Configurations accessible in ``fit`` and " +"``evaluate``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/how-to-design-stateful-clients.rst:2 +msgid "Design stateful ClientApps" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/how-to-design-stateful-clients.rst:20 msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +"By design, ClientApp_ objects are stateless. This means that the " +"``ClientApp`` object is recreated each time a new ``Message`` is to be " +"processed. This behaviour is identical with Flower's Simulation Engine " +"and Deployment Engine. For the former, it allows us to simulate the " +"running of a large number of nodes on a single machine or across multiple" +" machines. For the latter, it enables each ``SuperNode`` to be part of " +"multiple runs, each running a different ``ClientApp``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/how-to-design-stateful-clients.rst:27 msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"When a ``ClientApp`` is executed it receives a Context_. This context is " +"unique for each ``ClientApp``, meaning that subsequent executions of the " +"same ``ClientApp`` from the same node will receive the same ``Context`` " +"object. In the ``Context``, the ``.state`` attribute can be used to store" +" information that you would like the ``ClientApp`` to have access to for " +"the duration of the run. This could be anything from intermediate results" +" such as the history of training losses (e.g. as a list of `float` values" +" with a new entry appended each time the ``ClientApp`` is executed), " +"certain parts of the model that should persist at the client side, or " +"some other arbitrary Python objects. These items would need to be " +"serialized before saving them into the context." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." +#: ../../source/how-to-design-stateful-clients.rst:38 +msgid "Saving metrics to the context" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/how-to-design-stateful-clients.rst:40 +msgid "" +"This section will demonstrate how to save metrics such as accuracy/loss " +"values to the Context_ so they can be used in subsequent executions of " +"the ``ClientApp``. If your ``ClientApp`` makes use of NumPyClient_ then " +"entire object is also re-created for each call to methods like ``fit()`` " +"or ``evaluate()``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" +#: ../../source/how-to-design-stateful-clients.rst:45 +msgid "" +"Let's begin with a simple setting in which ``ClientApp`` is defined as " +"follows. The ``evaluate()`` method only generates a random number and " +"prints it." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/how-to-design-stateful-clients.rst:50 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"You can create a PyTorch project with ready-to-use ``ClientApp`` and " +"other components by running ``flwr new``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/how-to-design-stateful-clients.rst:81 msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"Let's say we want to save that randomly generated integer and append it " +"to a list that persists in the context. To do that, you'll need to do two" +" key things:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +#: ../../source/how-to-design-stateful-clients.rst:84 +msgid "Make the ``context.state`` reachable withing your client class" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +#: ../../source/how-to-design-stateful-clients.rst:85 msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +"Initialise the appropiate record type (in this example we use " +"ConfigsRecord_) and save/read your entry when required." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/how-to-design-stateful-clients.rst:123 msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +"If you run the app, you'll see an output similar to the one below. See " +"how after each round the `n_val` entry in the context gets one additional" +" integer ? Note that the order in which the `ClientApp` logs these " +"messages might differ slightly between rounds." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." +#: ../../source/how-to-design-stateful-clients.rst:146 +msgid "Saving model parameters to the context" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/how-to-design-stateful-clients.rst:148 msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +"Using ConfigsRecord_ or MetricsRecord_ to save \"simple\" components is " +"fine (e.g., float, integer, boolean, string, bytes, and lists of these " +"types. Note that MetricsRecord_ only supports float, integer, and lists " +"of these types) Flower has a specific type of record, a " +"ParametersRecord_, for storing model parameters or more generally data " +"arrays." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/how-to-design-stateful-clients.rst:153 +msgid "" +"Let's see a couple of examples of how to save NumPy arrays first and then" +" how to save parameters of PyTorch and TensorFlow models." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/how-to-design-stateful-clients.rst:158 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +"The examples below omit the definition of a ``ClientApp`` to keep the " +"code blocks concise. To make use of ``ParametersRecord`` objects in your " +"``ClientApp`` you can follow the same principles as outlined earlier." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." +#: ../../source/how-to-design-stateful-clients.rst:163 +msgid "Saving NumPy arrays to the context" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#: ../../source/how-to-design-stateful-clients.rst:165 +msgid "" +"Elements stored in a `ParametersRecord` are of type Array_, which is a " +"data structure that holds ``bytes`` and metadata that can be used for " +"deserialization. Let's see how to create an ``Array`` from a NumPy array " +"and insert it into a ``ParametersRecord``. Here we will make use of the " +"built-in serialization and deserialization mechanisms in Flower, namely " +"the ``flwr.common.array_from_numpy`` function and the `numpy()` method of" +" an Array_ object." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/how-to-design-stateful-clients.rst:174 msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +"Array_ objects carry bytes as their main payload and additional metadata " +"to use for deserialization. You can implement your own " +"serialization/deserialization if the provided ``array_from_numpy`` " +"doesn't fit your usecase." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/how-to-design-stateful-clients.rst:178 msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +"Let's see how to use those functions to store a NumPy array into the " +"context." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" +#: ../../source/how-to-design-stateful-clients.rst:206 +msgid "" +"To extract the data in a ``ParametersRecord``, you just need to " +"deserialize the array if interest. For example, following the example " +"above:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +#: ../../source/how-to-design-stateful-clients.rst:223 +msgid "Saving PyTorch parameters to the context" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" +#: ../../source/how-to-design-stateful-clients.rst:225 +msgid "" +"Following the NumPy example above, to save parameters of a PyTorch model " +"a straightforward way of doing so is to transform the parameters into " +"their NumPy representation and then proceed as shown earlier. Below is a " +"simple self-contained example for how to do this." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/how-to-design-stateful-clients.rst:263 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"Let say now you want to apply the parameters stored in your context to a " +"new instance of the model (as it happens each time a ``ClientApp`` is " +"executed). You will need to:" msgstr "" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" +#: ../../source/how-to-design-stateful-clients.rst:266 +msgid "Deserialize each element in your specific ``ParametersRecord``" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" +#: ../../source/how-to-design-stateful-clients.rst:267 +msgid "Construct a ``state_dict`` and load it" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/how-to-design-stateful-clients.rst:287 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"And that's it! Recall that even though this example shows how to store " +"the entire ``state_dict`` in a ``ParametersRecord``, you can just save " +"part of it. The process would be identical, but you might need to adjust " +"how it is loaded into an existing model using PyTorch APIs." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" +#: ../../source/how-to-design-stateful-clients.rst:293 +msgid "Saving Tensorflow/Keras parameters to the context" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/how-to-design-stateful-clients.rst:295 msgid "" -"The same ``Strategy``-customization approach can be used to aggregate " -"custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"Follow the same steps as done above but replace the ``state_dict`` logic " +"with simply `get_weights() " +"`_" +" to convert the model parameters to a list of NumPy arrays that can then " +"be serialized into an ``Array``. Then, after deserialization, use " +"`set_weights() " +"`_" +" to apply the new parameters to a model." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:2 +msgid "Enable TLS connections" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:39 +#: ../../source/how-to-enable-tls-connections.rst:4 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"This guide describes how to a TLS-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/how-to-enable-tls-connections.rst:8 +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/how-to-enable-tls-connections.rst:11 msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already TLS-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/how-to-enable-tls-connections.rst:16 +msgid "Certificates" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/how-to-enable-tls-connections.rst:18 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"Using TLS-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 +#: ../../source/how-to-enable-tls-connections.rst:29 msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -msgid "SuperLink verifies the token" +#: ../../source/how-to-enable-tls-connections.rst:32 +msgid "" +"The approach for generating TLS certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:14 -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +#: ../../source/how-to-enable-tls-connections.rst:40 +msgid "Server (SuperLink)" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:20 +#: ../../source/how-to-enable-tls-connections.rst:42 msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +"Navigate to the ``examples/advanced-tensorflow`` folder (`here " +"`_) and use the following terminal command to start a server " +"(SuperLink) that uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:24 +#: ../../source/how-to-enable-tls-connections.rst:54 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:28 -msgid "Enable node authentication in ``SuperLink``" +#: ../../source/how-to-enable-tls-connections.rst:58 +msgid "Clients (SuperNode)" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:30 +#: ../../source/how-to-enable-tls-connections.rst:60 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower ``SuperLink``." -" Use the following terminal command to start a Flower ``SuperNode`` that " -"has both secure connections and node authentication enabled:" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 -msgid "Let's break down the authentication flags:" +#: ../../source/how-to-enable-tls-connections.rst:71 +msgid "" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:49 +#: ../../source/how-to-enable-tls-connections.rst:74 msgid "" -"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " -"storing all known node public keys. You need to store all known node " -"public keys that are allowed to participate in a federation in one CSV " -"file (``.csv``)." +"In another terminal, start a second SuperNode that uses the same " +"certificates:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 +#: ../../source/how-to-enable-tls-connections.rst:84 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +"Note that in the second SuperNode, if you run both on the same machine, " +"you must specify a different port for the ``ClientAppIO`` API address to " +"avoid clashing with the first SuperNode." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:57 -msgid "" -"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" -"superlink-public-key`` expect paths to the server's private and public " -"keys. For development purposes, you can generate a private and public key" -" pair using ``ssh-keygen -t ecdsa -b 384``." +#: ../../source/how-to-enable-tls-connections.rst:89 +msgid "Executing ``flwr run`` with TLS" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:64 +#: ../../source/how-to-enable-tls-connections.rst:91 msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +"The root certificates used for executing ``flwr run`` is specified in the" +" ``pyproject.toml`` of your app." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:71 -msgid "Enable node authentication in ``SuperNode``" +#: ../../source/how-to-enable-tls-connections.rst:100 +msgid "" +"Note that the path to the ``root-certificates`` is relative to the root " +"of the project. Now, you can run the example by executing the following:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:73 +#: ../../source/how-to-enable-tls-connections.rst:110 msgid "" -"Similar to the long-running Flower server (``SuperLink``), you can easily" -" enable node authentication in the long-running Flower client " -"(``SuperNode``). Use the following terminal command to start an " -"authenticated ``SuperNode``:" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an TLS-enabled server and have two clients " +"establish secure connections to it. You should also have learned how to " +"run your Flower project using ``flwr run`` with TLS enabled." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:85 +#: ../../source/how-to-enable-tls-connections.rst:117 msgid "" -"The ``--auth-supernode-private-key`` flag expects a path to the node's " -"private key file and the ``--auth-supernode-public-key`` flag expects a " -"path to the node's public key file. For development purposes, you can " -"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " -"384``." +"For running a Docker setup with TLS enabled, please refer to :doc:`docker" +"/enable-tls`." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:91 -msgid "Security notice" +#: ../../source/how-to-enable-tls-connections.rst:121 +msgid "Additional resources" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:93 +#: ../../source/how-to-enable-tls-connections.rst:123 msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:100 -#: ../../source/how-to-enable-ssl-connections.rst:71 -#: ../../source/how-to-use-built-in-mods.rst:95 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" +#: ../../source/how-to-enable-tls-connections.rst:126 +msgid "`Let's Encrypt `_" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:102 -msgid "" -"You should now have learned how to start a long-running Flower server " -"(``SuperLink``) and client (``SuperNode``) with node authentication " -"enabled. You should also know the significance of the private key and " -"store it safely to minimize security risks." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:4 -msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:9 -msgid "Configuration values" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:11 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:25 -msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:31 -msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:36 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:41 -msgid "Configuration through built-in strategies" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:43 -msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like ``FedAvg``. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the " -"current round. It then forwards the configuration dictionary to all the " -"clients selected during that round." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:49 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:65 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -"``on_fit_config_fn``:" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:75 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:86 -msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:90 -msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:107 -msgid "The ``FedAvg`` strategy will call this function *every round*." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:110 -msgid "Configuring individual clients" -msgstr "" - -#: ../../source/how-to-configure-clients.rst:112 -msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:115 -msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " -"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " -"the config dict of a *single client* (only the first client in the list, " -"the other clients in this round to not receive this \"special\" config " -"value):" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. " -"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:35 -msgid "Saving log to file" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:37 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do ``fl.server.start_server``) and when using " -"the ``VirtualClientEngine`` (i.e. when you do " -"``fl.simulation.start_simulation``). In some situations you might want to" -" save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:59 -msgid "" -"With the above, Flower will record the log you see on your terminal to " -"``log.txt``. This file will be created in the same directory as were you " -"are running the code from. If we inspect we see the log above is also " -"recorded but prefixing with ``identifier`` each line:" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:81 -msgid "Log your own messages" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:83 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "" - -#: ../../source/how-to-configure-logging.rst:114 -msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "" - -#: ../../source/how-to-configure-logging.rst:140 -msgid "Log to a remote service" -msgstr "" - -#: ../../source/how-to-configure-logging.rst:142 -msgid "" -"The ``fl.common.logger.configure`` function, also allows specifying a " -"host to which logs can be pushed (via ``POST``) through a native Python " -"``logging.handler.HTTPHandler``. This is a particularly useful feature in" -" ``gRPC``-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a ``HTTPHandler`` should you " -"wish to backup or analyze the logs somewhere else." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:4 -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " -"can establish a secure connections to it." +#: ../../source/how-to-enable-tls-connections.rst:127 +msgid "`certbot `_" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:8 -msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +#: ../../source/how-to-implement-fedbn.rst:2 +msgid "Implement FedBN" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:11 +#: ../../source/how-to-implement-fedbn.rst:4 msgid "" -"The code example comes with a ``README.md`` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training method " +"designed for non-IID data. We are using PyTorch to train a Convolutional " +"Neural Network (with Batch Normalization layers) on the CIFAR-10 dataset." +" When applying FedBN, only minor changes are needed compared to " +":doc:`Quickstart PyTorch `." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" +#: ../../source/how-to-implement-fedbn.rst:12 +msgid "Model" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/how-to-implement-fedbn.rst:14 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in ``examples/advanced-" -"tensorflow/certificates/generate.sh`` with the following command " -"sequence:" +"A full introduction to federated learning with PyTorch and Flower can be " +"found in :doc:`Quickstart PyTorch `. This " +"how-to guide varies only a few details in ``task.py``. FedBN requires a " +"model architecture (defined in class ``Net()``) that uses Batch " +"Normalization layers:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/how-to-implement-fedbn.rst:45 msgid "" -"This will generate the certificates in ``examples/advanced-" -"tensorflow/.cache/certificates``." +"Try editing the model architecture, then run the project to ensure " +"everything still works:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:32 +#: ../../source/how-to-implement-fedbn.rst:52 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:40 -msgid "Server (SuperLink)" +"So far this should all look fairly familiar if you've used Flower with " +"PyTorch before." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:42 -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +#: ../../source/how-to-implement-fedbn.rst:55 +msgid "FedBN" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:52 +#: ../../source/how-to-implement-fedbn.rst:57 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:56 -msgid "Client (SuperNode)" +"To adopt FedBN, only the ``get_parameters`` and ``set_parameters`` " +"functions in ``task.py`` need to be revised. FedBN only changes the " +"client-side by excluding batch normalization parameters from being " +"exchanged with the server." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:58 +#: ../../source/how-to-implement-fedbn.rst:61 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"We revise the *client* logic by changing ``get_parameters`` and " +"``set_parameters`` in ``task.py``. The batch normalization parameters are" +" excluded from model parameter list when sending to or receiving from the" +" server:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:67 -msgid "" -"When setting ``root_certificates``, the client expects a file path to " -"PEM-encoded root certificates." +#: ../../source/how-to-implement-fedbn.rst:90 +msgid "To test the new appraoch, run the project again:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:73 +#: ../../source/how-to-implement-fedbn.rst:96 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Your PyTorch project now runs federated learning with FedBN. " +"Congratulations!" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:78 -msgid "Additional resources" +#: ../../source/how-to-implement-fedbn.rst:99 +msgid "Next Steps" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-implement-fedbn.rst:101 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:83 -msgid "`Let's Encrypt `_" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:84 -msgid "`certbot `_" +"The example is of course over-simplified since all clients load the exact" +" same dataset. This isn't realistic. You now have the tools to explore " +"this topic further. How about using different subsets of CIFAR-10 on each" +" client? How about adding more clients?" msgstr "" #: ../../source/how-to-implement-strategies.rst:2 @@ -6241,7 +5913,6 @@ msgid "Install stable release" msgstr "" #: ../../source/how-to-install-flower.rst:14 -#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "" @@ -6335,635 +6006,601 @@ msgid "" "should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" +#: ../../source/how-to-run-simulations.rst:22 +msgid "Run simulations" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/how-to-run-simulations.rst:24 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Simulating Federated Learning workloads is useful for a multitude of use " +"cases: you might want to run your workload on a large cohort of clients " +"without having to source, configure, and manage a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without going through " +"a complex setup process; you might want to validate your algorithm in " +"different scenarios at varying levels of data and system heterogeneity, " +"client availability, privacy budgets, etc. These are among some of the " +"use cases where simulating FL workloads makes sense." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:9 +#: ../../source/how-to-run-simulations.rst:33 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:13 -msgid "Downloads" +"Flower's ``Simulation Engine`` schedules, launches, and manages " +"|clientapp_link|_ instances. It does so through a ``Backend``, which " +"contains several workers (i.e., Python processes) that can execute a " +"``ClientApp`` by passing it a |context_link|_ and a |message_link|_. " +"These ``ClientApp`` objects are identical to those used by Flower's " +"`Deployment Engine `_, making " +"alternating between *simulation* and *deployment* an effortless process. " +"The execution of ``ClientApp`` objects through Flower's ``Simulation " +"Engine`` is:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:19 +#: ../../source/how-to-run-simulations.rst:41 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"**Resource-aware**: Each backend worker executing ``ClientApp``\\s gets " +"assigned a portion of the compute and memory on your system. You can " +"define these at the beginning of the simulation, allowing you to control " +"the degree of parallelism of your simulation. For a fixed total pool of " +"resources, the fewer the resources per backend worker, the more " +"``ClientApps`` can run concurrently on the same hardware." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:23 +#: ../../source/how-to-run-simulations.rst:46 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:26 -msgid "If you are on an M1 Mac, it should be:" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:33 -msgid "On the previous generation Intel Mac devices, it should be:" +"**Batchable**: When there are more ``ClientApps`` to execute than backend" +" workers, ``ClientApps`` are queued and executed as soon as resources are" +" freed. This means that ``ClientApps`` are typically executed in batches " +"of N, where N is the number of backend workers." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:40 +#: ../../source/how-to-run-simulations.rst:50 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"**Self-managed**: This means that you, as a user, do not need to launch " +"``ClientApps`` manually; instead, the ``Simulation Engine``'s internals " +"orchestrates the execution of all ``ClientApp``\\s." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:51 +#: ../../source/how-to-run-simulations.rst:53 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"**Ephemeral**: This means that a ``ClientApp`` is only materialized when " +"it is required by the application (e.g., to do `fit() `_). The object is destroyed afterward, " +"releasing the resources it was assigned and allowing other clients to " +"participate." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:67 +#: ../../source/how-to-run-simulations.rst:60 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"You can preserve the state (e.g., internal variables, parts of an ML " +"model, intermediate results) of a ``ClientApp`` by saving it to its " +"``Context``. Check the `Designing Stateful Clients `_ guide for a complete walkthrough." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:78 +#: ../../source/how-to-run-simulations.rst:65 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"The ``Simulation Engine`` delegates to a ``Backend`` the role of spawning" +" and managing ``ClientApps``. The default backend is the ``RayBackend``, " +"which uses `Ray `_, an open-source framework for " +"scalable Python workloads. In particular, each worker is an `Actor " +"`_ capable of " +"spawning a ``ClientApp`` given its ``Context`` and a ``Message`` to " +"process." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:94 -msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +#: ../../source/how-to-run-simulations.rst:73 +msgid "Launch your Flower simulation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:98 -msgid "Tracking metrics" +#: ../../source/how-to-run-simulations.rst:75 +msgid "" +"Running a simulation is straightforward; in fact, it is the default mode " +"of operation for |flwr_run_link|_. Therefore, running Flower simulations " +"primarily requires you to first define a ``ClientApp`` and a " +"``ServerApp``. A convenient way to generate a minimal but fully " +"functional Flower app is by means of the |flwr_new_link|_ command. There " +"are multiple templates to choose from. The example below uses the " +"``PyTorch`` template." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:100 +#: ../../source/how-to-run-simulations.rst:83 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"If you haven't already, install Flower via ``pip install -U flwr`` in a " +"Python environment." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-run-simulations.rst:91 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"Then, follow the instructions shown after completing the |flwr_new_link|_" +" command. When you execute |flwr_run_link|_, you'll be using the " +"``Simulation Engine``." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "Now, you are ready to start your workload." +#: ../../source/how-to-run-simulations.rst:94 +msgid "" +"If we take a look at the ``pyproject.toml`` that was generated from the " +"|flwr_new_link|_ command (and loaded upon |flwr_run_link|_ execution), we" +" see that a *default* federation is defined. It sets the number of " +"supernodes to 10." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"You can modify the size of your simulations by adjusting ``options.num-" +"supernodes``." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:127 -msgid "You can look at everything at http://127.0.0.1:8265 ." +#: ../../source/how-to-run-simulations.rst:109 +msgid "Simulation examples" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:129 +#: ../../source/how-to-run-simulations.rst:111 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +"In addition to the quickstart tutorials in the documentation (e.g., " +"`quickstart PyTorch Tutorial `_, " +"`quickstart JAX Tutorial `_), most examples" +" in the Flower repository are simulation-ready." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-run-simulations.rst:116 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"`Quickstart TensorFlow/Keras " +"`_." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:137 +#: ../../source/how-to-run-simulations.rst:118 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port ``3000`` on your" -" machine as long as they are running." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:147 -msgid "Resource allocation" +"`Quickstart PyTorch `_" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:149 +#: ../../source/how-to-run-simulations.rst:120 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"`Advanced PyTorch `_" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:152 +#: ../../source/how-to-run-simulations.rst:122 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"`Quickstart MLX `_" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:164 -msgid "In Google Colab, the result you see might be similar to this:" +#: ../../source/how-to-run-simulations.rst:123 +msgid "" +"`ViT fine-tuning `_" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-run-simulations.rst:125 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"The complete list of examples can be found in `the Flower GitHub " +"`_." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:195 -msgid "Let’s also specify the resource for a single client." +#: ../../source/how-to-run-simulations.rst:131 +msgid "Defining ``ClientApp`` resources" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:225 +#: ../../source/how-to-run-simulations.rst:133 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"By default, the ``Simulation Engine`` assigns two CPU cores to each " +"backend worker. This means that if your system has 10 CPU cores, five " +"backend workers can be running in parallel, each executing a different " +"``ClientApp`` instance." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-run-simulations.rst:137 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " -"two clients and therefore enable them to run concurrently. Be careful not" -" to require more resources than available. If you specified " -"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" -" GPUs but decided to set 1 in ``ray_init_args``)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:237 -msgid "Q: I don't see any metrics logged." +"More often than not, you would probably like to adjust the resources your" +" ``ClientApp`` gets assigned based on the complexity (i.e., compute and " +"memory footprint) of your workload. You can do so by adjusting the " +"backend resources for your federation." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:239 +#: ../../source/how-to-run-simulations.rst:143 +#, python-format msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"Note that the resources the backend assigns to each worker (and hence to " +"each ``ClientApp`` being executed) are assigned in a *soft* manner. This " +"means that the resources are primarily taken into account in order to " +"control the degree of parallelism at which ``ClientApp`` instances should" +" be executed. Resource assignment is **not strict**, meaning that if you " +"specified your ``ClientApp`` is assumed to make use of 25% of the " +"available VRAM but it ends up using 50%, it might cause other " +"``ClientApp`` instances to crash throwing an out-of-memory (OOM) error." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:243 +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"Customizing resources can be done directly in the ``pyproject.toml`` of " +"your app." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:246 +#: ../../source/how-to-run-simulations.rst:160 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"With the above backend settings, your simulation will run as many " +"``ClientApps`` in parallel as CPUs you have in your system. GPU resources" +" for your ``ClientApp`` can be assigned by specifying the **ratio** of " +"VRAM each should make use of." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:252 +#: ../../source/how-to-run-simulations.rst:173 msgid "" -"Q: I see \"This site can't be reached\" when going to " -"http://127.0.0.1:8265." +"If you are using TensorFlow, you need to `enable memory growth " +"`_ so " +"multiple ``ClientApp`` instances can share a GPU. This needs to be done " +"before launching the simulation. To do so, set the environment variable " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"``." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:254 +#: ../../source/how-to-run-simulations.rst:179 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:257 -msgid "Resources" +"Let's see how the above configuration results in a different number of " +"``ClientApps`` running in parallel depending on the resources available " +"in your system. If your system has:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:259 +#: ../../source/how-to-run-simulations.rst:183 +#, python-format msgid "" -"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" -"started.html" +"10x CPUs and 1x GPU: at most 4 ``ClientApps`` will run in parallel since " +"each requires 25% of the available VRAM." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:261 -msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"10x CPUs and 2x GPUs: at most 8 ``ClientApps`` will run in parallel " +"(VRAM-limited)." msgstr "" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-run-simulations.rst:186 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"6x CPUs and 4x GPUs: at most 6 ``ClientApps`` will run in parallel (CPU-" +"limited)." msgstr "" -#: ../../source/how-to-run-simulations.rst:19 +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -"``VirtualClientEngine`` are:" +"10x CPUs but 0x GPUs: you won't be able to run the simulation since not " +"even the resources for a single ``ClientApp`` can be met." msgstr "" -#: ../../source/how-to-run-simulations.rst:26 +#: ../../source/how-to-run-simulations.rst:190 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"A generalization of this is given by the following equation. It gives the" +" maximum number of ``ClientApps`` that can be executed in parallel on " +"available CPU cores (SYS_CPUS) and VRAM (SYS_GPUS)." msgstr "" -#: ../../source/how-to-run-simulations.rst:31 +#: ../../source/how-to-run-simulations.rst:194 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to ``VirtualClientEngine``'s " -"internals." +"N = \\min\\left(\\left\\lfloor \\frac{\\text{SYS_CPUS}}{\\text{num_cpus}}" +" \\right\\rfloor, \\left\\lfloor " +"\\frac{\\text{SYS_GPUS}}{\\text{num_gpus}} \\right\\rfloor\\right)" msgstr "" -#: ../../source/how-to-run-simulations.rst:33 +#: ../../source/how-to-run-simulations.rst:198 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"Both ``num_cpus`` (an integer higher than 1) and ``num_gpus`` (a non-" +"negative real number) should be set on a per ``ClientApp`` basis. If, for" +" example, you want only a single ``ClientApp`` to run on each GPU, then " +"set ``num_gpus=1.0``. If, for example, a ``ClientApp`` requires access to" +" two whole GPUs, you'd set ``num_gpus=2``." msgstr "" -#: ../../source/how-to-run-simulations.rst:38 +#: ../../source/how-to-run-simulations.rst:203 msgid "" -"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." +"While the ``options.backend.client-resources`` can be used to control the" +" degree of concurrency in your simulations, this does not stop you from " +"running hundreds or even thousands of clients in the same round and " +"having orders of magnitude more *dormant* (i.e., not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The ``Simulation " +"Engine`` will schedule 100 ``ClientApps`` to run and then will execute " +"them in a resource-aware manner in batches of 8." msgstr "" -#: ../../source/how-to-run-simulations.rst:45 -msgid "Launch your Flower simulation" +#: ../../source/how-to-run-simulations.rst:212 +msgid "Simulation Engine resources" msgstr "" -#: ../../source/how-to-run-simulations.rst:47 +#: ../../source/how-to-run-simulations.rst:214 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"By default, the ``Simulation Engine`` has **access to all system " +"resources** (i.e., all CPUs, all GPUs). However, in some settings, you " +"might want to limit how many of your system resources are used for " +"simulation. You can do this in the ``pyproject.toml`` of your app by " +"setting the ``options.backend.init_args`` variable." msgstr "" -#: ../../source/how-to-run-simulations.rst:73 -msgid "VirtualClientEngine resources" +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"With the above setup, the Backend will be initialized with a single CPU " +"and GPU. Therefore, even if more CPUs and GPUs are available in your " +"system, they will not be used for the simulation. The example above " +"results in a single ``ClientApp`` running at any given point." msgstr "" -#: ../../source/how-to-run-simulations.rst:75 +#: ../../source/how-to-run-simulations.rst:233 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -"``ray_init_args`` input argument to ``start_simulation`` which the VCE " -"internally passes to Ray's ``ray.init`` command. For a complete list of " -"settings you can configure check the `ray.init " +"For a complete list of settings you can configure, check the `ray.init " "`_" -" documentation. Do not set ``ray_init_args`` if you want the VCE to use " -"all your system's CPUs and GPUs." +" documentation." msgstr "" -#: ../../source/how-to-run-simulations.rst:97 -msgid "Assigning client resources" +#: ../../source/how-to-run-simulations.rst:236 +msgid "For the highest performance, do not set ``options.backend.init_args``." msgstr "" -#: ../../source/how-to-run-simulations.rst:99 -msgid "" -"By default the ``VirtualClientEngine`` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has " -"10 cores, that many virtual clients can be concurrently running." +#: ../../source/how-to-run-simulations.rst:239 +msgid "Simulation in Colab/Jupyter" msgstr "" -#: ../../source/how-to-run-simulations.rst:103 +#: ../../source/how-to-run-simulations.rst:241 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"The preferred way of running simulations should always be " +"|flwr_run_link|_. However, the core functionality of the ``Simulation " +"Engine`` can be used from within a Google Colab or Jupyter environment by" +" means of `run_simulation `_." msgstr "" -#: ../../source/how-to-run-simulations.rst:110 -msgid "``num_cpus`` indicates the number of CPU cores a client would get." +#: ../../source/how-to-run-simulations.rst:262 +msgid "" +"With ``run_simulation``, you can also control the amount of resources for" +" your ``ClientApp`` instances. Do so by setting ``backend_config``. If " +"unset, the default resources are assigned (i.e., 2xCPUs per ``ClientApp``" +" and no GPU)." msgstr "" -#: ../../source/how-to-run-simulations.rst:111 -msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +#: ../../source/how-to-run-simulations.rst:273 +msgid "" +"Refer to the `30 minutes Federated AI Tutorial " +"`_ for a complete example on how to " +"run Flower Simulations in Colab." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 -msgid "Let's see a few examples:" +#: ../../source/how-to-run-simulations.rst:280 +msgid "Multi-node Flower simulations" msgstr "" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:282 msgid "" -"While the ``client_resources`` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " -"client sampled by the strategy) and then will execute them in a resource-" -"aware manner in batches of 8." +"Flower's ``Simulation Engine`` allows you to run FL simulations across " +"multiple compute nodes so that you're not restricted to running " +"simulations on a _single_ machine. Before starting your multi-node " +"simulation, ensure that you:" msgstr "" -#: ../../source/how-to-run-simulations.rst:140 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +#: ../../source/how-to-run-simulations.rst:286 +msgid "Have the same Python environment on all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:145 -msgid "Simulation examples" +#: ../../source/how-to-run-simulations.rst:287 +msgid "Have a copy of your code on all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:288 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"Have a copy of your dataset on all nodes. If you are using partitions " +"from `Flower Datasets `_, ensure the " +"partitioning strategy its parameterization are the same. The expectation " +"is that the i-th dataset partition is identical in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:151 +#: ../../source/how-to-run-simulations.rst:292 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"Start Ray on your head node: on the terminal, type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: ../../source/how-to-run-simulations.rst:154 +#: ../../source/how-to-run-simulations.rst:295 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on the terminal of a new node (before " +"executing |flwr_run_link|_). For example: ``ray start " +"--address='192.168.1.132:6379'``. Note that to be able to attach nodes to" +" the head node they should be discoverable by each other." msgstr "" -#: ../../source/how-to-run-simulations.rst:159 -msgid "Multi-node Flower simulations" +#: ../../source/how-to-run-simulations.rst:300 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation were running on a single node. In other words:" msgstr "" -#: ../../source/how-to-run-simulations.rst:161 +#: ../../source/how-to-run-simulations.rst:308 msgid "" -"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure" -" that you:" +"Once your simulation is finished, if you'd like to dismantle your " +"cluster, you simply need to run the command ``ray stop`` in each node's " +"terminal (including the head node)." msgstr "" -#: ../../source/how-to-run-simulations.rst:164 -msgid "Have the same Python environment in all nodes." +#: ../../source/how-to-run-simulations.rst:313 +msgid "" +"When attaching a new node to the head, all its resources (i.e., all CPUs," +" all GPUs) will be visible by the head node. This means that the " +"``Simulation Engine`` can schedule as many ``ClientApp`` instances as " +"that node can possibly run. In some settings, you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"``--num-cpus=`` and/or ``--num-" +"gpus=`` in any ``ray start`` command (including when " +"starting the head)." msgstr "" -#: ../../source/how-to-run-simulations.rst:165 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/how-to-run-simulations.rst:322 +msgid "FAQ for Simulations" msgstr "" -#: ../../source/how-to-run-simulations.rst:166 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +#: ../../source/how-to-run-simulations.rst +msgid "Can I make my ``ClientApp`` instances stateful?" msgstr "" -#: ../../source/how-to-run-simulations.rst:168 +#: ../../source/how-to-run-simulations.rst:326 msgid "" -"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " -"``VirtualClientEngine`` attaches to a running Ray instance." +"Yes. Use the ``state`` attribute of the |context_link|_ object that is " +"passed to the ``ClientApp`` to save variables, parameters, or results to " +"it. Read the `Designing Stateful Clients `_ guide for a complete walkthrough." msgstr "" -#: ../../source/how-to-run-simulations.rst:171 -msgid "" -"Start Ray on you head node: on the terminal type ``ray start --head``. " -"This command will print a few lines, one of which indicates how to attach" -" other nodes to the head node." +#: ../../source/how-to-run-simulations.rst +msgid "Can I run multiple simulations on the same machine?" msgstr "" -#: ../../source/how-to-run-simulations.rst:174 +#: ../../source/how-to-run-simulations.rst:330 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -"``ray start --address='192.168.1.132:6379'``" +"Yes, but bear in mind that each simulation isn't aware of the resource " +"usage of the other. If your simulations make use of GPUs, consider " +"setting the ``CUDA_VISIBLE_DEVICES`` environment variable to make each " +"simulation use a different set of the available GPUs. Export such an " +"environment variable before starting |flwr_run_link|_." msgstr "" -#: ../../source/how-to-run-simulations.rst:178 +#: ../../source/how-to-run-simulations.rst msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"Do the CPU/GPU resources set for each ``ClientApp`` restrict how much " +"compute/memory these make use of?" msgstr "" -#: ../../source/how-to-run-simulations.rst:181 +#: ../../source/how-to-run-simulations.rst:334 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command ``ray stop`` in each node's terminal " -"(including the head node)." +"No. These resources are exclusively used by the simulation backend to " +"control how many workers can be created on startup. Let's say N backend " +"workers are launched, then at most N ``ClientApp`` instances will be " +"running in parallel. It is your responsibility to ensure ``ClientApp`` " +"instances have enough resources to execute their workload (e.g., fine-" +"tune a transformer model)." msgstr "" -#: ../../source/how-to-run-simulations.rst:185 -msgid "Multi-node simulation good-to-know" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +#: ../../source/how-to-run-simulations.rst +msgid "My ``ClientApp`` is triggering OOM on my GPU. What should I do?" msgstr "" -#: ../../source/how-to-run-simulations.rst:189 +#: ../../source/how-to-run-simulations.rst:338 msgid "" -"User ``ray status`` to check all nodes connected to your head node as " -"well as the total resources available to the ``VirtualClientEngine``." +"It is likely that your `num_gpus` setting, which controls the number of " +"``ClientApp`` instances that can share a GPU, is too low (meaning too " +"many ``ClientApps`` share the same GPU). Try the following:" msgstr "" -#: ../../source/how-to-run-simulations.rst:192 +#: ../../source/how-to-run-simulations.rst:340 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -"``VirtualClientEngine`` can schedule as many `virtual` clients as that " -"node can possible run. In some settings you might want to exclude certain" -" resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in " -"any ``ray start`` command (including when starting the head)" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:202 -msgid "Considerations for simulations" +"Set your ``num_gpus=1``. This will make a single ``ClientApp`` run on a " +"GPU." msgstr "" -#: ../../source/how-to-run-simulations.rst:206 -msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +#: ../../source/how-to-run-simulations.rst:341 +msgid "Inspect how much VRAM is being used (use ``nvidia-smi`` for this)." msgstr "" -#: ../../source/how-to-run-simulations.rst:209 +#: ../../source/how-to-run-simulations.rst:342 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Based on the VRAM you see your single ``ClientApp`` using, calculate how " +"many more would fit within the remaining VRAM. One divided by the total " +"number of ``ClientApps`` is the ``num_gpus`` value you should set." msgstr "" -#: ../../source/how-to-run-simulations.rst:217 -msgid "GPU resources" +#: ../../source/how-to-run-simulations.rst:344 +msgid "Refer to :ref:`clientappresources` for more details." msgstr "" -#: ../../source/how-to-run-simulations.rst:219 +#: ../../source/how-to-run-simulations.rst:346 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -"``num_gpus`` in ``client_resources``. This being said, Ray (used " -"internally by the VCE) is by default:" +"If your ``ClientApp`` is using TensorFlow, make sure you are exporting " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"`` before starting your simulation. For " +"more details, check." msgstr "" -#: ../../source/how-to-run-simulations.rst:222 +#: ../../source/how-to-run-simulations.rst msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set ``num_gpus=0.5`` and you have two GPUs in your system with different" -" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " -"concurrently." +"How do I know what's the right ``num_cpus`` and ``num_gpus`` for my " +"``ClientApp``?" msgstr "" -#: ../../source/how-to-run-simulations.rst:225 +#: ../../source/how-to-run-simulations.rst:350 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"A good practice is to start by running the simulation for a few rounds " +"with higher ``num_cpus`` and ``num_gpus`` than what is really needed " +"(e.g., ``num_cpus=8`` and, if you have a GPU, ``num_gpus=1``). Then " +"monitor your CPU and GPU utilization. For this, you can make use of tools" +" such as ``htop`` and ``nvidia-smi``. If you see overall resource " +"utilization remains low, try lowering ``num_cpus`` and ``num_gpus`` " +"(recall this will make more ``ClientApp`` instances run in parallel) " +"until you see a satisfactory system resource utilization." msgstr "" -#: ../../source/how-to-run-simulations.rst:228 +#: ../../source/how-to-run-simulations.rst:352 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Note that if the workload on your ``ClientApp`` instances is not " +"homogeneous (i.e., some come with a larger compute or memory footprint), " +"you'd probably want to focus on those when coming up with a good value " +"for ``num_gpus`` and ``num_cpus``." msgstr "" -#: ../../source/how-to-run-simulations.rst:231 -msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +#: ../../source/how-to-run-simulations.rst +msgid "Can I assign different resources to each ``ClientApp`` instance?" msgstr "" -#: ../../source/how-to-run-simulations.rst:235 +#: ../../source/how-to-run-simulations.rst:356 msgid "" -"In addition, the GPU resource limits passed to ``client_resources`` are " -"not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:240 -msgid "TensorFlow with GPUs" +"No. All ``ClientApp`` objects are assumed to make use of the same " +"``num_cpus`` and ``num_gpus``. When setting these values (refer to " +":ref:`clientappresources` for more details), ensure the ``ClientApp`` " +"with the largest memory footprint (either RAM or VRAM) can run in your " +"system with others like it in parallel." msgstr "" -#: ../../source/how-to-run-simulations.rst:242 +#: ../../source/how-to-run-simulations.rst msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"Can I run single simulation accross multiple compute nodes (e.g. GPU " +"servers)?" msgstr "" -#: ../../source/how-to-run-simulations.rst:249 +#: ../../source/how-to-run-simulations.rst:360 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " -"order to specify a function to be executed upon actor initialization. In " -"this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"Yes. If you are using the ``RayBackend`` (the *default* backend) you can " +"first interconnect your nodes through Ray's cli and then launch the " +"simulation. Refer to :ref:`multinodesimulations` for a step-by-step " +"guide." msgstr "" -#: ../../source/how-to-run-simulations.rst:272 +#: ../../source/how-to-run-simulations.rst msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"My ``ServerApp`` also needs to make use of the GPU (e.g., to do " +"evaluation of the *global model* after aggregation). Is this GPU usage " +"taken into account by the ``Simulation Engine``?" msgstr "" -#: ../../source/how-to-run-simulations.rst:276 -msgid "Multi-node setups" +#: ../../source/how-to-run-simulations.rst:364 +msgid "" +"No. The ``Simulation Engine`` only manages ``ClientApps`` and therefore " +"is only aware of the system resources they require. If your ``ServerApp``" +" makes use of substantial compute or memory resources, factor that into " +"account when setting ``num_cpus`` and ``num_gpus``." msgstr "" -#: ../../source/how-to-run-simulations.rst:278 +#: ../../source/how-to-run-simulations.rst msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Can I indicate on what resource a specific instance of a ``ClientApp`` " +"should run? Can I do resource placement?" msgstr "" -#: ../../source/how-to-run-simulations.rst:286 +#: ../../source/how-to-run-simulations.rst:368 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Currently, the placement of ``ClientApp`` instances is managed by the " +"``RayBackend`` (the only backend available as of ``flwr==1.13.0``) and " +"cannot be customized. Implementing a *custom* backend would be a way of " +"achieving resource placement." msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +msgid "Save and Load Model Checkpoints" msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 @@ -6974,7 +6611,7 @@ msgid "" msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" +msgid "Model Checkpointing" msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 @@ -6990,11 +6627,11 @@ msgid "" "weights to the caller (i.e., the server):" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 -msgid "Save and load PyTorch checkpoints" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:58 +msgid "Save and Load PyTorch Checkpoints" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:60 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -7004,24 +6641,45 @@ msgid "" " class structure." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:103 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" " latest one:" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:116 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:119 +msgid "" +"Alternatively, we can save and load the model updates during evaluation " +"phase by overriding ``evaluate()`` or ``aggregate_evaluate()`` method of " +"the strategy (``FedAvg``). Checkout the details in `Advanced PyTorch " +"Example `_ and `Advanced TensorFlow Example " +"`_." +msgstr "" + #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:6 +msgid "" +"This guide is for users who have already worked with Flower 0.x and want " +"to upgrade to Flower 1.0. Newer versions of Flower (1.13 and later) are " +"based on a new architecture and not covered in this guide. After " +"upgrading Flower 0.x projects to Flower 1.0, please refer to " +":doc:`Upgrade to Flower 1.13 ` to make " +"your project compatible with the lastest version of Flower." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:13 msgid "" "Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" " foundation for future growth. Compared to Flower 0.19 (and other 0.x " @@ -7029,129 +6687,129 @@ msgid "" " to change the code of existing 0.x-series projects." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 -#: ../../source/how-to-upgrade-to-flower-next.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:49 msgid "Install update" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:21 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "pip: add ``-U`` when installing." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:25 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " "before running ``poetry install``)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:32 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 -#: ../../source/how-to-upgrade-to-flower-next.rst:121 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:88 msgid "Required changes" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "The following breaking changes require manual updates." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:42 msgid "General" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:44 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:70 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:72 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -7159,19 +6817,19 @@ msgid "" "last round of training." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:75 msgid "Rename parameter/ndarray conversion functions:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:78 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -7181,51 +6839,51 @@ msgid "" "FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:92 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " "``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:95 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:97 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:99 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "Custom strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -7234,35 +6892,35 @@ msgid "" "``aggregate_evaluate``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:112 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:118 msgid "Optional improvements" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:120 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -7270,19 +6928,19 @@ msgid "" "necessary." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:126 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 -#: ../../source/how-to-upgrade-to-flower-next.rst:349 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:130 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:451 msgid "Further help" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:132 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -7291,186 +6949,245 @@ msgid "" "`_ and use the channel ``#questions``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:2 +msgid "Upgrade to Flower 1.13" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:4 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " +"Welcome to the migration guide for updating Flower to Flower 1.13! " "Whether you're a seasoned user or just getting started, this guide will " "help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +" latest features and improvements in Flower 1.13." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:11 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:10 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"This guide shows how to make pre-``1.13`` Flower code compatible with " +"Flower 1.13 (and later) with only minimal code changes." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:13 msgid "Let's dive in!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:51 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " +"Here's how to update an existing installation of Flower to Flower 1.13 " "with ``pip``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:74 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:57 +msgid "or if you need Flower 1.13 with simulation:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:63 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:73 msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:101 -msgid "Using Poetry" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:90 +msgid "" +"Starting with Flower 1.8, the *infrastructure* and *application layers* " +"have been decoupled. Flower 1.13 enforces this separation further. Among " +"other things, this allows you to run the exact same code in a simulation " +"as in a real deployment." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:94 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"Instead of starting a client in code via ``start_client()``, you create a" +" |clientapp_link|_. Instead of starting a server in code via " +"``start_server()``, you create a |serverapp_link|_. Both ``ClientApp`` " +"and ``ServerApp`` are started by the long-running components of the " +"server and client: the `SuperLink` and `SuperNode`, respectively." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:106 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:102 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" +"For more details on SuperLink and SuperNode, please see the " +"|flower_architecture_link|_ ." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:105 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"The following non-breaking changes require manual updates and allow you " +"to run your project both in the traditional (now deprecated) way and in " +"the new (recommended) Flower 1.13 way:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:110 msgid "|clientapp_link|_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:134 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:112 msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"via ``start_client()``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:146 msgid "|serverapp_link|_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:159 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:148 msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"the server via ``start_server()``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:180 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:185 msgid "Deployment" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:182 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:187 +msgid "" +"In a terminal window, start the SuperLink using |flower_superlink_link|_." +" Then, in two additional terminal windows, start two SuperNodes using " +"|flower_supernode_link|_ (2x). There is no need to directly run " +"``client.py`` and ``server.py`` as Python scripts." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:190 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"Here's an example to start the server without HTTPS (insecure mode, only " +"for prototyping):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:185 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:195 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"For a comprehensive walk-through on how to deploy Flower using Docker, " +"please refer to the :doc:`docker/index` guide." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:218 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"Here's another example to start both SuperLink and SuperNodes with HTTPS." +" Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile``" +" command line options to pass paths to (CA certificate, server " +"certificate, and server private key)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:229 -msgid "Simulation in CLI" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:246 +msgid "Simulation (CLI)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:231 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:248 msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"|serverapp_link|_, respectively. There is no need to use " +"``start_simulation()`` anymore. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:253 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:389 +msgid "" +"For a comprehensive guide on how to setup and run Flower simulations " +"please read the |flower_how_to_run_simulations_link|_ guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:310 +msgid "Depending on your Flower version, you can run your simulation as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:312 +msgid "" +"For Flower 1.11 and later, run ``flwr run`` in the terminal. This is the " +"recommended way to start simulations, other ways are deprecated and no " +"longer recommended." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:264 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:314 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"DEPRECATED For Flower versions between 1.8 and 1.10, run ``flower-" +"simulation`` in the terminal and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +" In the code snippet below, there is an example (assuming the " +"``server_app`` and ``client_app`` objects are in a ``sim.py`` module)." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:318 +msgid "DEPRECATED For Flower versions before 1.8, run the Python script directly." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:337 +msgid "" +"Depending on your Flower version, you can also define the default " +"resources as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:339 +msgid "" +"For Flower 1.11 and later, you can edit your ``pyproject.toml`` file and " +"then run ``flwr run`` in the terminal as shown in the example below." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:341 +msgid "" +"DEPRECATED For Flower versions between 1.8 and 1.10, you can adjust the " +"resources for each |clientapp_link|_ using the ``--backend-config`` " +"command line argument instead of setting the ``client_resources`` " +"argument in ``start_simulation()``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:344 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:384 +msgid "" +"DEPRECATED For Flower versions before 1.8, you need to run " +"``start_simulation()`` and pass a dictionary of the required resources to" +" the ``client_resources`` argument." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:281 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:375 +msgid "Simulation (Notebook)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:377 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"To run your simulation from within a notebook, please consider the " +"following examples depending on your Flower version:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:305 -msgid "Simulation in a Notebook" +#: ../../source/how-to-upgrade-to-flower-1.13.rst:380 +msgid "" +"For Flower 1.11 and later, you need to run |runsim_link|_ in your " +"notebook instead of ``start_simulation()``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:307 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:382 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"DEPRECATED For Flower versions between 1.8 and 1.10, you need to run " +"|runsim_link|_ in your notebook instead of ``start_simulation()`` and " +"configure the resources." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:453 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"Most official `Flower code examples `_ " +"are already updated to Flower 1.13 so they can serve as a reference for " +"using the Flower 1.13 API. If there are further questions, `join the " +"Flower Slack `_ (and use the channel " +"``#questions``) or post them on `Flower Discuss " +"`_ where you can find the community posting " +"and answering questions." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:460 msgid "Important" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:360 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:462 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"As we continuously enhance Flower at a rapid pace, we'll be periodically " +"updating this guide. Please feel free to share any feedback with us!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:366 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:465 msgid "Happy migrating! 🚀" msgstr "" @@ -7698,7 +7415,7 @@ msgid "" "side clipping:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:115 +#: ../../source/how-to-use-differential-privacy.rst:116 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -7710,11 +7427,11 @@ msgstr "" msgid "local DP mod" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:126 msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:140 +#: ../../source/how-to-use-differential-privacy.rst:144 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -7722,19 +7439,19 @@ msgid "" "parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:145 +#: ../../source/how-to-use-differential-privacy.rst:149 msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:147 +#: ../../source/how-to-use-differential-privacy.rst:151 msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " "TensorFlow Privacy. For examples of using Flower with these engines, " "please refer to the Flower examples directory (`Opacus " "`_, `Tensorflow" -" Privacy `_)." +" Privacy `_)." msgstr "" #: ../../source/how-to-use-strategies.rst:2 @@ -7759,12 +7476,12 @@ msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "" #: ../../source/how-to-use-strategies.rst:11 -#: ../../source/how-to-use-strategies.rst:43 +#: ../../source/how-to-use-strategies.rst:66 msgid "Customize an existing strategy with callback functions" msgstr "" #: ../../source/how-to-use-strategies.rst:12 -#: ../../source/how-to-use-strategies.rst:99 +#: ../../source/how-to-use-strategies.rst:139 msgid "Implement a novel strategy" msgstr "" @@ -7774,64 +7491,78 @@ msgstr "" #: ../../source/how-to-use-strategies.rst:17 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Flower comes with a number of popular federated learning Strategies which" +" can be instantiated as follows:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "" +"To make the ``ServerApp`` use this strategy, pass a ``server_fn`` " +"function to the ``ServerApp`` constructor. The ``server_fn`` function " +"should return a ``ServerAppComponents`` object that contains the strategy" +" instance and a ``ServerConfig`` instance." msgstr "" -#: ../../source/how-to-use-strategies.rst:27 +#: ../../source/how-to-use-strategies.rst:50 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the ``start_server`` function. It is usually recommended" -" to adjust a few parameters during instantiation:" +"Both ``Strategy`` and ``ServerConfig`` classes can be configured with " +"parameters. The ``Context`` object passed to ``server_fn`` contains the " +"values specified in the ``[tool.flwr.app.config]`` table in your " +"``pyproject.toml`` (a snippet is shown below). To access these values, " +"use ``context.run_config``." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:68 msgid "" -"Existing strategies provide several ways to customize their behaviour. " +"Existing strategies provide several ways to customize their behavior. " "Callback functions allow strategies to call user-provided code during " -"execution." +"execution. This approach enables you to modify the strategy's partial " +"behavior without rewriting the whole class from zero." msgstr "" -#: ../../source/how-to-use-strategies.rst:49 +#: ../../source/how-to-use-strategies.rst:73 msgid "Configuring client fit and client evaluate" msgstr "" -#: ../../source/how-to-use-strategies.rst:51 +#: ../../source/how-to-use-strategies.rst:75 msgid "" "The server can pass new configuration values to the client each round by " "providing a function to ``on_fit_config_fn``. The provided function will " "be called by the strategy and must return a dictionary of configuration " -"key values pairs that will be sent to the client. It must return a " +"key value pairs that will be sent to the client. It must return a " "dictionary of arbitrary configuration values ``client.fit`` and " "``client.evaluate`` functions during each round of federated learning." msgstr "" -#: ../../source/how-to-use-strategies.rst:84 +#: ../../source/how-to-use-strategies.rst:121 msgid "" "The ``on_fit_config_fn`` can be used to pass arbitrary configuration " -"values from server to client, and potentially change these values each " +"values from server to client and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " "the dictionary returned by the ``on_fit_config_fn`` in its own " -"``client.fit()`` function." +"``client.fit()`` function. And while the values can be also passed " +"directly via the context this function can be a place to implement finer " +"control over the `fit` behaviour that may not be achieved by the context," +" which sets fixed values." msgstr "" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:129 msgid "" "Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " "to customize the configuration sent to ``client.evaluate()``" msgstr "" -#: ../../source/how-to-use-strategies.rst:93 +#: ../../source/how-to-use-strategies.rst:133 msgid "Configuring server-side evaluation" msgstr "" -#: ../../source/how-to-use-strategies.rst:95 +#: ../../source/how-to-use-strategies.rst:135 msgid "" "Server-side evaluation can be enabled by passing an evaluation function " "to ``evaluate_fn``." msgstr "" -#: ../../source/how-to-use-strategies.rst:101 +#: ../../source/how-to-use-strategies.rst:141 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies :1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:139::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:155 +#: ../../source/index.rst:148 msgid "Contributor docs" msgstr "" -#: ../../source/index.rst:157 +#: ../../source/index.rst:150 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -7998,10 +7725,15 @@ msgstr "" msgid "Flower CLI reference" msgstr "" -#: ../../source/ref-api-cli.rst:7 -msgid "flwr CLI" +#: ../../source/ref-api-cli.rst:5 +msgid "Basic Commands" msgstr "" +#: ../../source/ref-api-cli.rst:10 +#, fuzzy +msgid "``flwr`` CLI" +msgstr "``FLWR_VERSION``" + #: ../../flwr:1 msgid "flwr is the Flower command line interface." msgstr "" @@ -8087,7 +7819,7 @@ msgstr "" msgid "Arguments" msgstr "Argumento de compilação" -#: ../../flwr install:1 log:1 new:1 run:1 +#: ../../flwr install:1 log:1 ls:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "Argumento de compilação" @@ -8104,7 +7836,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log run +#: ../../flwr log ls run msgid "default" msgstr "" @@ -8129,6 +7861,32 @@ msgstr "" msgid "Name of the federation to run the app on" msgstr "" +#: ../../flwr ls:1 +msgid "List runs." +msgstr "" + +#: ../../flwr ls:1 +msgid "List all runs" +msgstr "" + +#: ../../flwr ls:1 run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" + +#: ../../flwr ls:1 +msgid "Specific run ID to display" +msgstr "" + +#: ../../flwr ls:1 +#, fuzzy +msgid "Path of the Flower project" +msgstr "O nome do repositório da imagem base." + +#: ../../flwr ls:1 +msgid "Name of the federation" +msgstr "" + #: ../../flwr new:1 msgid "Create new Flower App." msgstr "" @@ -8182,11 +7940,6 @@ msgid "" "default." msgstr "" -#: ../../flwr run:1 -#, fuzzy -msgid "``False``" -msgstr "``FLWR_VERSION``" - #: ../../flwr run:1 #, fuzzy msgid "Path of the Flower App to run." @@ -8196,33 +7949,62 @@ msgstr "O nome do repositório da imagem base." msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api-cli.rst:16 -msgid "flower-simulation" +#: ../../source/ref-api-cli.rst:19 +#, fuzzy +msgid "``flower-superlink``" +msgstr "``FLWR_VERSION``" + +#: ../../source/ref-api-cli.rst:29 +#, fuzzy +msgid "``flower-supernode``" +msgstr "``FLWR_VERSION``" + +#: ../../source/ref-api-cli.rst:37 +msgid "Advanced Commands" msgstr "" -#: ../../source/ref-api-cli.rst:26 -msgid "flower-superlink" +#: ../../source/ref-api-cli.rst:42 +#, fuzzy +msgid "``flwr-serverapp``" +msgstr "``FLWR_VERSION``" + +#: ../../source/ref-api-cli.rst:52 +msgid "``flwr-clientapp``" msgstr "" -#: ../../source/ref-api-cli.rst:36 -msgid "flower-supernode" +#: ../../source/ref-api-cli.rst:60 +msgid "Technical Commands" msgstr "" -#: ../../source/ref-api-cli.rst:46 -msgid "flower-server-app" +#: ../../source/ref-api-cli.rst:65 +#, fuzzy +msgid "``flower-simulation``" +msgstr "``FLWR_VERSION``" + +#: ../../source/ref-api-cli.rst:73 +msgid "Deprecated Commands" msgstr "" -#: ../../source/ref-api-cli.rst:50 +#: ../../source/ref-api-cli.rst:78 +#, fuzzy +msgid "``flower-server-app``" +msgstr "Clone o repositório do flower." + +#: ../../source/ref-api-cli.rst:82 msgid "" -"Note that since version ``1.11.0``, ``flower-server-app`` no longer " -"supports passing a reference to a `ServerApp` attribute. Instead, you " -"need to pass the path to Flower app via the argument ``--app``. This is " -"the path to a directory containing a `pyproject.toml`. You can create a " -"valid Flower app by executing ``flwr new`` and following the prompt." +"Note that from version ``1.13.0``, ``flower-server-app`` is deprecated. " +"Instead, you only need to execute |flwr_run_link|_ to start the run." msgstr "" -#: ../../source/ref-api-cli.rst:64 -msgid "flower-superexec" +#: ../../source/ref-api-cli.rst:88 +#, fuzzy +msgid "``flower-superexec``" +msgstr "``FLWR_VERSION``" + +#: ../../source/ref-api-cli.rst:92 +msgid "" +"Note that from version ``1.13.0``, ``flower-superexec`` is deprecated. " +"Instead, you only need to execute |flower_superlink_link|_." msgstr "" #: ../../source/ref-api/flwr.rst:2 @@ -8308,6 +8090,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:24 #: ../../source/ref-api/flwr.server.strategy.rst:17 #: ../../source/ref-api/flwr.server.workflow.rst:17 +#: ../../source/ref-api/flwr.simulation.rst:26 msgid "Classes" msgstr "" @@ -8421,6 +8204,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:15 msgid "Methods" msgstr "" @@ -8521,7 +8305,7 @@ msgstr "" #: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.Driver.rst:43 #: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 @@ -8565,6 +8349,7 @@ msgstr "" #: flwr.server.driver.driver.Driver.pull_messages #: flwr.server.driver.driver.Driver.push_messages #: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.driver.driver.Driver.set_run #: flwr.server.serverapp_components.ServerAppComponents #: flwr.server.strategy.bulyan.Bulyan #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping @@ -8588,7 +8373,8 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.run_simulation.run_simulation of +#: flwr.simulation.run_simulation.run_simulation +#: flwr.simulation.simulationio_connection.SimulationIoConnection of msgid "Parameters" msgstr "" @@ -8606,6 +8392,7 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -8638,6 +8425,7 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -8692,10 +8480,6 @@ msgstr "" msgid "The current client properties." msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" -msgstr "" - #: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 #: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 #: flwr.common.context.Context:1 flwr.common.message.Error:1 @@ -8714,11 +8498,11 @@ msgstr "" #: flwr.server.serverapp_components.ServerAppComponents:1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.app.start_client:51 flwr.client.app.start_numpy_client:36 #: flwr.client.client_app.ClientApp:4 #: flwr.client.client_app.ClientApp.evaluate:4 #: flwr.client.client_app.ClientApp.query:4 @@ -8727,7 +8511,7 @@ msgstr "" #: flwr.common.record.configsrecord.ConfigsRecord:20 #: flwr.common.record.metricsrecord.MetricsRecord:19 #: flwr.common.record.parametersrecord.ParametersRecord:22 -#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:46 #: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 @@ -9173,24 +8957,30 @@ msgstr "" msgid "start\\_client" msgstr "" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: flwr.client.app.start_client:5 of +msgid "" +"This function is deprecated since 1.13.0. Use :code:`flower-supernode` " +"command instead to start a SuperNode." +msgstr "" + +#: flwr.client.app.start_client:8 flwr.client.app.start_numpy_client:9 of msgid "" "The IPv4 or IPv6 address of the server. If the Flower server runs on the " "same machine on port 8080, then `server_address` would be " "`\"[::]:8080\"`." msgstr "" -#: flwr.client.app.start_client:7 of +#: flwr.client.app.start_client:12 of msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.client.app.start_client:9 of +#: flwr.client.app.start_client:14 of msgid "" "An implementation of the abstract base class `flwr.client.Client` " "(default: None)" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: flwr.client.app.start_client:17 flwr.client.app.start_numpy_client:15 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " server. The default should be sufficient for most models. Users who " @@ -9200,49 +8990,57 @@ msgid "" "increased limit and block larger messages." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.client.app.start_client:24 flwr.client.app.start_numpy_client:22 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " "to an SSL-enabled Flower server." msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.client.app.start_client:28 flwr.client.app.start_numpy_client:26 of msgid "" "Starts an insecure gRPC connection when True. Enables HTTPS connection " "when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.client.app.start_client:31 flwr.client.app.start_numpy_client:29 of msgid "" "Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " "bidirectional streaming - 'grpc-rere': gRPC, request-response " "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:31 of +#: flwr.client.app.start_client:36 of +msgid "" +"Tuple containing the elliptic curve private key and public key for " +"authentication from the cryptography library. Source: " +"https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ Used " +"to establish an authenticated connection with the server." +msgstr "" + +#: flwr.client.app.start_client:41 of msgid "" "The maximum number of times the client will try to connect to the server " "before giving up in case of a connection error. If set to None, there is " "no limit to the number of tries." msgstr "" -#: flwr.client.app.start_client:35 of +#: flwr.client.app.start_client:45 of msgid "" "The maximum duration before the client stops trying to connect to the " "server in case of connection error. If set to None, there is no limit to " "the total time." msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:59 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:70 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -9415,8 +9213,8 @@ msgstr "" #: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`Context `\\ \\(node\\_id\\, " -"node\\_config\\, state\\, run\\_config\\)" +":py:obj:`Context `\\ \\(run\\_id\\, node\\_id\\, " +"node\\_config\\, state\\, ...\\)" msgstr "" #: ../../source/ref-api/flwr.common.rst:68::1 @@ -9936,19 +9734,23 @@ msgid "Context" msgstr "" #: flwr.common.context.Context:3 of -msgid "The ID that identifies the node." +msgid "The ID that identifies the run." msgstr "" #: flwr.common.context.Context:5 of +msgid "The ID that identifies the node." +msgstr "" + +#: flwr.common.context.Context:7 of msgid "" "A config (key/value mapping) unique to the node and independent of the " "`run_config`. This config persists across all runs this node participates" " in." msgstr "" -#: flwr.common.context.Context:8 of +#: flwr.common.context.Context:10 of msgid "" -"Holds records added by the entity in a given run and that will stay " +"Holds records added by the entity in a given `run_id` and that will stay " "local. This means that the data it holds will never leave the system it's" " running from. This can be used as an intermediate storage or scratchpad " "when executing mods. It can also be used as a memory to access at " @@ -9956,26 +9758,30 @@ msgid "" "multiple rounds)" msgstr "" -#: flwr.common.context.Context:15 of +#: flwr.common.context.Context:17 of msgid "" -"A config (key/value mapping) held by the entity in a given run and that " -"will stay local. It can be used at any point during the lifecycle of this" -" entity (e.g. across multiple rounds)" +"A config (key/value mapping) held by the entity in a given `run_id` and " +"that will stay local. It can be used at any point during the lifecycle of" +" this entity (e.g. across multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:32::1 +msgid ":py:obj:`run_id `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 msgid ":py:obj:`node_id `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 msgid ":py:obj:`node_config `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 msgid ":py:obj:`run_config `\\" msgstr "" @@ -10533,18 +10339,6 @@ msgid "" "`\\" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "" - #: flwr.common.EventType.capitalize:1::1 of msgid "" ":py:obj:`CLI_FLOWER_SIMULATION_ENTER " @@ -11112,6 +10906,10 @@ msgstr "" msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" +#: flwr.common.message.Message.create_error_reply:12 of +msgid "**message** -- A Message containing only the relevant error and metadata." +msgstr "" + #: flwr.common.message.Message.create_reply:3 of msgid "" "The method generates a new `Message` as a reply to this message. It " @@ -11155,6 +10953,10 @@ msgstr "" msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "" + #: flwr.common.Metadata.created_at:1::1 #: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of msgid "An identifier for the current run." @@ -11713,7 +11515,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:37::1 #: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +msgid "Abstract base Driver class for the ServerAppIo API." msgstr "" #: ../../source/ref-api/flwr.server.rst:37::1 @@ -11881,6 +11683,10 @@ msgstr "" msgid "**num_available** -- The number of currently available clients." msgstr "" +#: flwr.server.client_manager.ClientManager.register:3 of +msgid "The ClientProxy of the Client to register." +msgstr "" + #: flwr.server.client_manager.ClientManager.register:6 #: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" @@ -11894,63 +11700,76 @@ msgstr "" msgid "This method is idempotent." msgstr "" +#: flwr.server.client_manager.ClientManager.unregister:5 of +msgid "The ClientProxy of the Client to unregister." +msgstr "" + #: ../../source/ref-api/flwr.server.Driver.rst:2 msgid "Driver" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`create_message `\\ " "\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.create_message:1 of msgid "Create a new message with specified parameters." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.get_node_ids:1 of msgid "Get node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of msgid "Pull messages based on message IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`push_messages `\\ " "\\(messages\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.push_messages:1 of msgid "Push messages to specified node IDs." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 msgid "" ":py:obj:`send_and_receive `\\ " "\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +msgid ":py:obj:`set_run `\\ \\(run\\_id\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#: flwr.server.driver.driver.Driver.set_run:1 of +msgid "Request a run to the SuperLink with a given `run_id`." +msgstr "" + #: flwr.server.driver.driver.Driver.create_message:1::1 of msgid ":py:obj:`run `\\" msgstr "" @@ -12060,6 +11879,17 @@ msgid "" "which is not affected by `timeout`." msgstr "" +#: flwr.server.driver.driver.Driver.set_run:3 of +msgid "" +"If a Run with the specified `run_id` exists, a local Run object will be " +"created. It enables further functionality in the driver, such as sending " +"`Messages`." +msgstr "" + +#: flwr.server.driver.driver.Driver.set_run:7 of +msgid "The `run_id` of the Run this Driver object operates in." +msgstr "" + #: ../../source/ref-api/flwr.server.History.rst:2 msgid "History" msgstr "" @@ -12132,35 +11962,39 @@ msgstr "" msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`client_manager `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`history `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`node_id `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`node_config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 msgid ":py:obj:`run_config `\\" msgstr "" @@ -12239,10 +12073,6 @@ msgstr "" msgid "Replace server strategy." msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" -msgstr "" - #: flwr.server.server_app.ServerApp:5 of msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" @@ -12270,7 +12100,7 @@ msgid "" "thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.app.start_server:9 +#: flwr.server.app.start_server:14 #: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " @@ -12391,31 +12221,37 @@ msgstr "" msgid "start\\_server" msgstr "" -#: flwr.server.app.start_server:3 of +#: flwr.server.app.start_server:5 of +msgid "" +"This function is deprecated since 1.13.0. Use the :code:`flower-" +"superlink` command instead to start a SuperLink." +msgstr "" + +#: flwr.server.app.start_server:8 of msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.server.app.start_server:10 of msgid "" "A server implementation, either `flwr.server.Server` or a subclass " "thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.server.app.start_server:17 of msgid "" "An implementation of the abstract base class " "`flwr.server.strategy.Strategy`. If no strategy is provided, then " "`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.server.app.start_server:21 of msgid "" "An implementation of the abstract base class `flwr.server.ClientManager`." " If no implementation is provided, then `start_server` will use " "`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.server.app.start_server:26 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " clients. The default should be sufficient for most models. Users who " @@ -12425,7 +12261,7 @@ msgid "" "increased limit and block larger messages." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " @@ -12433,34 +12269,34 @@ msgid "" "server certificate. * server private key." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " "bytes elements in the following order:" msgstr "" -#: flwr.server.app.start_server:32 of +#: flwr.server.app.start_server:37 of msgid "CA certificate." msgstr "" -#: flwr.server.app.start_server:33 of +#: flwr.server.app.start_server:38 of msgid "server certificate." msgstr "" -#: flwr.server.app.start_server:34 of +#: flwr.server.app.start_server:39 of msgid "server private key." msgstr "" -#: flwr.server.app.start_server:37 of +#: flwr.server.app.start_server:42 of msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.app.start_server:42 of +#: flwr.server.app.start_server:47 of msgid "Starting an insecure server:" msgstr "" -#: flwr.server.app.start_server:46 of +#: flwr.server.app.start_server:51 of msgid "Starting an SSL-enabled server:" msgstr "" @@ -13747,7 +13583,7 @@ msgid "" msgstr "" #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:1231 +#: ../../source/ref-changelog.md:1434 msgid "FedAdagrad" msgstr "" @@ -15400,28 +15236,66 @@ msgstr "" msgid "simulation" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 msgid "" ":py:obj:`run_simulation `\\ " "\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.run_simulation.run_simulation:1 of msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 +msgid "" +":py:obj:`run_simulation_process " +"`\\ \\(...\\[\\, flwr\\_dir\\_\\," +" ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#: flwr.simulation.app.run_simulation_process:1 of +msgid "Run Flower Simulation process." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 msgid "" ":py:obj:`start_simulation `\\ " "\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.start_simulation:1 of msgid "Log error stating that module `ray` could not be imported." msgstr "" +#: ../../source/ref-api/flwr.simulation.rst:31::1 +msgid "" +":py:obj:`SimulationIoConnection " +"`\\ \\(\\[...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of +msgid "`SimulationIoConnection` provides an interface to the SimulationIo API." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:2 +msgid "SimulationIoConnection" +msgstr "" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:3 of +msgid "The address (URL, IPv6, IPv4) of the SuperLink SimulationIo API service." +msgstr "" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:5 of +msgid "" +"The PEM-encoded root certificates as a byte string. If provided, a secure" +" connection using the certificates will be established to an SSL-enabled " +"Flower server." +msgstr "" + #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 msgid "run\\_simulation" msgstr "" @@ -15476,6 +15350,10 @@ msgid "" "If enabled, DEBUG-level logs will be displayed." msgstr "" +#: ../../source/ref-api/flwr.simulation.run_simulation_process.rst:2 +msgid "run\\_simulation\\_process" +msgstr "" + #: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 msgid "start\\_simulation" msgstr "" @@ -15485,25 +15363,27 @@ msgid "Changelog" msgstr "" #: ../../source/ref-changelog.md:3 -msgid "v1.11.1 (2024-09-11)" +msgid "v1.13.1 (2024-11-26)" msgstr "" #: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 +#: ../../source/ref-changelog.md:138 ../../source/ref-changelog.md:208 +#: ../../source/ref-changelog.md:240 ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:442 ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:606 ../../source/ref-changelog.md:699 +#: ../../source/ref-changelog.md:799 ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:1074 ../../source/ref-changelog.md:1143 msgid "Thanks to our contributors" msgstr "" #: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:140 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:242 ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:444 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:608 ../../source/ref-changelog.md:701 +#: ../../source/ref-changelog.md:801 ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:949 ../../source/ref-changelog.md:1007 msgid "" "We would like to give our special thanks to all the contributors who made" " the new version of Flower possible (in `git shortlog` order):" @@ -15511,6738 +15391,6981 @@ msgstr "" #: ../../source/ref-changelog.md:9 msgid "" -"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " -"`Javier`, `Robert Steiner`, `Yan Gao` " -msgstr "" - -#: ../../source/ref-changelog.md:11 -msgid "Improvements" +"`Adam Narozniak`, `Charles Beauville`, `Heng Pan`, `Javier`, `Robert " +"Steiner` " +msgstr "" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:144 ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:448 +#: ../../source/ref-changelog.md:548 ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:705 ../../source/ref-changelog.md:805 +#: ../../source/ref-changelog.md:889 ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:1011 ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1209 ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1318 ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1429 ../../source/ref-changelog.md:1468 +#: ../../source/ref-changelog.md:1501 ../../source/ref-changelog.md:1551 +msgid "What's new?" msgstr "" #: ../../source/ref-changelog.md:13 msgid "" -"**Implement** `keys/values/items` **methods for** `TypedDict` " -"([#4146](https://github.com/adap/flower/pull/4146))" +"**Fix `SimulationEngine` Executor for SuperLink** " +"([#4563](https://github.com/adap/flower/pull/4563), " +"[#4568](https://github.com/adap/flower/pull/4568), " +"[#4570](https://github.com/adap/flower/pull/4570))" msgstr "" #: ../../source/ref-changelog.md:15 msgid "" -"**Fix parsing of** `--executor-config` **if present** " -"([#4125](https://github.com/adap/flower/pull/4125))" +"Resolved an issue that prevented SuperLink from functioning correctly " +"when using the `SimulationEngine` executor." msgstr "" #: ../../source/ref-changelog.md:17 msgid "" -"**Adjust framework name in templates docstrings** " -"([#4127](https://github.com/adap/flower/pull/4127))" +"**Improve FAB build and install** " +"([#4571](https://github.com/adap/flower/pull/4571))" msgstr "" #: ../../source/ref-changelog.md:19 msgid "" -"**Update** `flwr new` **Hugging Face template** " -"([#4169](https://github.com/adap/flower/pull/4169))" +"An updated FAB build and install process produces smaller FAB files and " +"doesn't rely on `pip install` any more. It also resolves an issue where " +"all files were unnecessarily included in the FAB file. The `flwr` CLI " +"commands now correctly pack only the necessary files, such as `.md`, " +"`.toml` and `.py`, ensuring more efficient and accurate packaging." msgstr "" #: ../../source/ref-changelog.md:21 msgid "" -"**Fix** `flwr new` **FlowerTune template** " -"([#4123](https://github.com/adap/flower/pull/4123))" +"**Update** `embedded-devices` **example** " +"([#4381](https://github.com/adap/flower/pull/4381))" msgstr "" #: ../../source/ref-changelog.md:23 -msgid "" -"**Add buffer time after** `ServerApp` **thread initialization** " -"([#4119](https://github.com/adap/flower/pull/4119))" +msgid "The example now uses the `flwr run` command and the Deployment Engine." msgstr "" #: ../../source/ref-changelog.md:25 msgid "" -"**Handle unsuitable resources for simulation** " -"([#4143](https://github.com/adap/flower/pull/4143))" +"**Update Documentation** " +"([#4566](https://github.com/adap/flower/pull/4566), " +"[#4569](https://github.com/adap/flower/pull/4569), " +"[#4560](https://github.com/adap/flower/pull/4560), " +"[#4556](https://github.com/adap/flower/pull/4556), " +"[#4581](https://github.com/adap/flower/pull/4581), " +"[#4537](https://github.com/adap/flower/pull/4537), " +"[#4562](https://github.com/adap/flower/pull/4562), " +"[#4582](https://github.com/adap/flower/pull/4582))" msgstr "" #: ../../source/ref-changelog.md:27 msgid "" -"**Update example READMEs** " -"([#4117](https://github.com/adap/flower/pull/4117))" +"Enhanced documentation across various aspects, including updates to " +"translation workflows, Docker-related READMEs, and recommended datasets. " +"Improvements also include formatting fixes for dataset partitioning docs " +"and better references to resources in the datasets documentation index." msgstr "" #: ../../source/ref-changelog.md:29 msgid "" -"**Update SuperNode authentication docs** " -"([#4160](https://github.com/adap/flower/pull/4160))" +"**Update Infrastructure and CI/CD** " +"([#4577](https://github.com/adap/flower/pull/4577), " +"[#4578](https://github.com/adap/flower/pull/4578), " +"[#4558](https://github.com/adap/flower/pull/4558), " +"[#4551](https://github.com/adap/flower/pull/4551), " +"[#3356](https://github.com/adap/flower/pull/3356), " +"[#4559](https://github.com/adap/flower/pull/4559), " +"[#4575](https://github.com/adap/flower/pull/4575))" msgstr "" -#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 -#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 -#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 -#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 -#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 -#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 -#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 -#: ../../source/ref-changelog.md:1253 -msgid "Incompatible changes" +#: ../../source/ref-changelog.md:31 +msgid "" +"**General improvements** " +"([#4557](https://github.com/adap/flower/pull/4557), " +"[#4564](https://github.com/adap/flower/pull/4564), " +"[#4573](https://github.com/adap/flower/pull/4573), " +"[#4561](https://github.com/adap/flower/pull/4561), " +"[#4579](https://github.com/adap/flower/pull/4579), " +"[#4572](https://github.com/adap/flower/pull/4572))" +msgstr "" + +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:198 ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:408 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." msgstr "" #: ../../source/ref-changelog.md:35 -msgid "v1.11.0 (2024-08-30)" +msgid "v1.13.0 (2024-11-20)" msgstr "" #: ../../source/ref-changelog.md:41 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," -" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " -"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +"Beutel`, `Daniel Nata Nugraha`, `Dimitris Stripelis`, `Heng Pan`, " +"`Javier`, `Mohammad Naseri`, `Robert Steiner`, `Waris Gill`, `William " +"Lindskog`, `Yan Gao`, `Yao Xu`, `wwjang` " msgstr "" -#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 -#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 -#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 -#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 -#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 -#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 -#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 -#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 -#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 -#: ../../source/ref-changelog.md:1348 -msgid "What's new?" +#: ../../source/ref-changelog.md:45 +msgid "" +"**Introduce `flwr ls` command** " +"([#4460](https://github.com/adap/flower/pull/4460), " +"[#4459](https://github.com/adap/flower/pull/4459), " +"[#4477](https://github.com/adap/flower/pull/4477))" msgstr "" -#: ../../source/ref-changelog.md:45 +#: ../../source/ref-changelog.md:47 msgid "" -"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " -"([#4006](https://github.com/adap/flower/pull/4006), " -"[#3945](https://github.com/adap/flower/pull/3945), " -"[#3999](https://github.com/adap/flower/pull/3999), " -"[#4027](https://github.com/adap/flower/pull/4027), " -"[#3851](https://github.com/adap/flower/pull/3851), " -"[#3946](https://github.com/adap/flower/pull/3946), " -"[#4003](https://github.com/adap/flower/pull/4003), " -"[#4029](https://github.com/adap/flower/pull/4029), " -"[#3942](https://github.com/adap/flower/pull/3942), " -"[#3957](https://github.com/adap/flower/pull/3957), " -"[#4020](https://github.com/adap/flower/pull/4020), " -"[#4044](https://github.com/adap/flower/pull/4044), " -"[#3852](https://github.com/adap/flower/pull/3852), " -"[#4019](https://github.com/adap/flower/pull/4019), " -"[#4031](https://github.com/adap/flower/pull/4031), " -"[#4036](https://github.com/adap/flower/pull/4036), " -"[#4049](https://github.com/adap/flower/pull/4049), " -"[#4017](https://github.com/adap/flower/pull/4017), " -"[#3943](https://github.com/adap/flower/pull/3943), " -"[#3944](https://github.com/adap/flower/pull/3944), " -"[#4011](https://github.com/adap/flower/pull/4011), " -"[#3619](https://github.com/adap/flower/pull/3619))" -msgstr "" - -#: ../../source/ref-changelog.md:47 -msgid "" -"Dynamic code updates are here! `flwr run` can now ship and install the " -"latest version of your `ServerApp` and `ClientApp` to an already-running " -"federation (SuperLink and SuperNodes)." +"The `flwr ls` command is now available to display details about all runs " +"(or one specific run). It supports the following usage options:" msgstr "" #: ../../source/ref-changelog.md:49 -msgid "" -"How does it work? `flwr run` bundles your Flower app into a single FAB " -"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," -" to both the SuperLink and those SuperNodes that need it. This allows you" -" to keep SuperExec, SuperLink and SuperNodes running as permanent " -"infrastructure, and then ship code updates (including completely new " -"projects!) dynamically." +msgid "`flwr ls --runs [] []`: Lists all runs." msgstr "" -#: ../../source/ref-changelog.md:51 -msgid "`flwr run` is all you need." +#: ../../source/ref-changelog.md:50 +msgid "" +"`flwr ls --run-id [] []`: Displays details for " +"a specific run." msgstr "" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:52 msgid "" -"**Introduce isolated** `ClientApp` **execution** " -"([#3970](https://github.com/adap/flower/pull/3970), " -"[#3976](https://github.com/adap/flower/pull/3976), " -"[#4002](https://github.com/adap/flower/pull/4002), " -"[#4001](https://github.com/adap/flower/pull/4001), " -"[#4034](https://github.com/adap/flower/pull/4034), " -"[#4037](https://github.com/adap/flower/pull/4037), " -"[#3977](https://github.com/adap/flower/pull/3977), " -"[#4042](https://github.com/adap/flower/pull/4042), " -"[#3978](https://github.com/adap/flower/pull/3978), " -"[#4039](https://github.com/adap/flower/pull/4039), " -"[#4033](https://github.com/adap/flower/pull/4033), " -"[#3971](https://github.com/adap/flower/pull/3971), " -"[#4035](https://github.com/adap/flower/pull/4035), " -"[#3973](https://github.com/adap/flower/pull/3973), " -"[#4032](https://github.com/adap/flower/pull/4032))" +"This command provides information including the run ID, FAB ID and " +"version, run status, elapsed time, and timestamps for when the run was " +"created, started running, and finished." msgstr "" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:54 msgid "" -"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" -" enterprise deployment, this allows you to set strict limits on what the " -"`ClientApp` can and cannot do." +"**Fuse SuperLink and SuperExec** " +"([#4358](https://github.com/adap/flower/pull/4358), " +"[#4403](https://github.com/adap/flower/pull/4403), " +"[#4406](https://github.com/adap/flower/pull/4406), " +"[#4357](https://github.com/adap/flower/pull/4357), " +"[#4359](https://github.com/adap/flower/pull/4359), " +"[#4354](https://github.com/adap/flower/pull/4354), " +"[#4229](https://github.com/adap/flower/pull/4229), " +"[#4283](https://github.com/adap/flower/pull/4283), " +"[#4352](https://github.com/adap/flower/pull/4352))" msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "`flower-supernode` supports three `--isolation` modes:" +#: ../../source/ref-changelog.md:56 +msgid "" +"SuperExec has been integrated into SuperLink, enabling SuperLink to " +"directly manage ServerApp processes (`flwr-serverapp`). The `flwr` CLI " +"now targets SuperLink's Exec API. Additionally, SuperLink introduces two " +"isolation modes for running ServerApps: `subprocess` (default) and " +"`process`, which can be specified using the `--isolation " +"{subprocess,process}` flag." msgstr "" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:58 msgid "" -"Unset: The SuperNode runs the `ClientApp` in the same process (as in " -"previous versions of Flower). This is the default mode." +"**Introduce `flwr-serverapp` command** " +"([#4394](https://github.com/adap/flower/pull/4394), " +"[#4370](https://github.com/adap/flower/pull/4370), " +"[#4367](https://github.com/adap/flower/pull/4367), " +"[#4350](https://github.com/adap/flower/pull/4350), " +"[#4364](https://github.com/adap/flower/pull/4364), " +"[#4400](https://github.com/adap/flower/pull/4400), " +"[#4363](https://github.com/adap/flower/pull/4363), " +"[#4401](https://github.com/adap/flower/pull/4401), " +"[#4388](https://github.com/adap/flower/pull/4388), " +"[#4402](https://github.com/adap/flower/pull/4402))" msgstr "" #: ../../source/ref-changelog.md:60 msgid "" -"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " -"`ClientApp`." +"The `flwr-serverapp` command has been introduced as a CLI entry point " +"that runs a `ServerApp` process. This process communicates with SuperLink" +" to load and execute the `ServerApp` object, enabling isolated execution " +"and more flexible deployment." msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:62 msgid "" -"`--isolation=process`: The SuperNode expects an externally-managed " -"process to run the `ClientApp`. This external process is not managed by " -"the SuperNode, so it has to be started beforehand and terminated " -"manually. The common way to use this isolation mode is via the new " -"`flwr/clientapp` Docker image." +"**Improve simulation engine and introduce `flwr-simulation` command** " +"([#4433](https://github.com/adap/flower/pull/4433), " +"[#4486](https://github.com/adap/flower/pull/4486), " +"[#4448](https://github.com/adap/flower/pull/4448), " +"[#4427](https://github.com/adap/flower/pull/4427), " +"[#4438](https://github.com/adap/flower/pull/4438), " +"[#4421](https://github.com/adap/flower/pull/4421), " +"[#4430](https://github.com/adap/flower/pull/4430), " +"[#4462](https://github.com/adap/flower/pull/4462))" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:64 msgid "" -"**Improve Docker support for enterprise deployments** " -"([#4050](https://github.com/adap/flower/pull/4050), " -"[#4090](https://github.com/adap/flower/pull/4090), " -"[#3784](https://github.com/adap/flower/pull/3784), " -"[#3998](https://github.com/adap/flower/pull/3998), " -"[#4094](https://github.com/adap/flower/pull/4094), " -"[#3722](https://github.com/adap/flower/pull/3722))" +"The simulation engine has been significantly improved, resulting in " +"dramatically faster simulations. Additionally, the `flwr-simulation` " +"command has been introduced to enhance maintainability and provide a " +"dedicated entry point for running simulations." msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:66 msgid "" -"Flower 1.11 ships many Docker improvements that are especially useful for" -" enterprise deployments:" -msgstr "" - -#: ../../source/ref-changelog.md:67 -msgid "`flwr/supernode` comes with a new Alpine Docker image." +"**Improve SuperLink message management** " +"([#4378](https://github.com/adap/flower/pull/4378), " +"[#4369](https://github.com/adap/flower/pull/4369))" msgstr "" #: ../../source/ref-changelog.md:68 msgid "" -"`flwr/clientapp` is a new image to be used with the `--isolation=process`" -" option. In this mode, SuperNode and `ClientApp` run in two different " -"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " -"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " -"runs the `ClientApp`. This is the recommended way to deploy Flower in " -"enterprise settings." -msgstr "" - -#: ../../source/ref-changelog.md:69 -msgid "" -"New all-in-one Docker Compose enables you to easily start a full Flower " -"Deployment Engine on a single machine." +"SuperLink now validates the destination node ID of instruction messages " +"and checks the TTL (time-to-live) for reply messages. When pulling reply " +"messages, an error reply will be generated and returned if the " +"corresponding instruction message does not exist, has expired, or if the " +"reply message exists but has expired." msgstr "" #: ../../source/ref-changelog.md:70 msgid "" -"Completely new Docker documentation: " -"https://flower.ai/docs/framework/docker/index.html" +"**Introduce FedDebug baseline** " +"([#3783](https://github.com/adap/flower/pull/3783))" msgstr "" #: ../../source/ref-changelog.md:72 msgid "" -"**Improve SuperNode authentication** " -"([#4043](https://github.com/adap/flower/pull/4043), " -"[#4047](https://github.com/adap/flower/pull/4047), " -"[#4074](https://github.com/adap/flower/pull/4074))" +"FedDebug is a framework that enhances debugging in Federated Learning by " +"enabling interactive inspection of the training process and automatically" +" identifying clients responsible for degrading the global model's " +"performance—all without requiring testing data or labels. Learn more in " +"the [FedDebug baseline " +"documentation](https://flower.ai/docs/baselines/feddebug.html)." msgstr "" #: ../../source/ref-changelog.md:74 msgid "" -"SuperNode auth has been improved in several ways, including improved " -"logging, improved testing, and improved error handling." +"**Update documentation** " +"([#4511](https://github.com/adap/flower/pull/4511), " +"[#4010](https://github.com/adap/flower/pull/4010), " +"[#4396](https://github.com/adap/flower/pull/4396), " +"[#4499](https://github.com/adap/flower/pull/4499), " +"[#4269](https://github.com/adap/flower/pull/4269), " +"[#3340](https://github.com/adap/flower/pull/3340), " +"[#4482](https://github.com/adap/flower/pull/4482), " +"[#4387](https://github.com/adap/flower/pull/4387), " +"[#4342](https://github.com/adap/flower/pull/4342), " +"[#4492](https://github.com/adap/flower/pull/4492), " +"[#4474](https://github.com/adap/flower/pull/4474), " +"[#4500](https://github.com/adap/flower/pull/4500), " +"[#4514](https://github.com/adap/flower/pull/4514), " +"[#4236](https://github.com/adap/flower/pull/4236), " +"[#4112](https://github.com/adap/flower/pull/4112), " +"[#3367](https://github.com/adap/flower/pull/3367), " +"[#4501](https://github.com/adap/flower/pull/4501), " +"[#4373](https://github.com/adap/flower/pull/4373), " +"[#4409](https://github.com/adap/flower/pull/4409), " +"[#4356](https://github.com/adap/flower/pull/4356), " +"[#4520](https://github.com/adap/flower/pull/4520), " +"[#4524](https://github.com/adap/flower/pull/4524), " +"[#4525](https://github.com/adap/flower/pull/4525), " +"[#4526](https://github.com/adap/flower/pull/4526), " +"[#4527](https://github.com/adap/flower/pull/4527), " +"[#4528](https://github.com/adap/flower/pull/4528), " +"[#4545](https://github.com/adap/flower/pull/4545), " +"[#4522](https://github.com/adap/flower/pull/4522), " +"[#4534](https://github.com/adap/flower/pull/4534), " +"[#4513](https://github.com/adap/flower/pull/4513), " +"[#4529](https://github.com/adap/flower/pull/4529), " +"[#4441](https://github.com/adap/flower/pull/4441), " +"[#4530](https://github.com/adap/flower/pull/4530), " +"[#4470](https://github.com/adap/flower/pull/4470), " +"[#4553](https://github.com/adap/flower/pull/4553), " +"[#4531](https://github.com/adap/flower/pull/4531), " +"[#4554](https://github.com/adap/flower/pull/4554), " +"[#4555](https://github.com/adap/flower/pull/4555), " +"[#4552](https://github.com/adap/flower/pull/4552), " +"[#4533](https://github.com/adap/flower/pull/4533))" msgstr "" #: ../../source/ref-changelog.md:76 msgid "" -"**Update** `flwr new` **templates** " -"([#3933](https://github.com/adap/flower/pull/3933), " -"[#3894](https://github.com/adap/flower/pull/3894), " -"[#3930](https://github.com/adap/flower/pull/3930), " -"[#3931](https://github.com/adap/flower/pull/3931), " -"[#3997](https://github.com/adap/flower/pull/3997), " -"[#3979](https://github.com/adap/flower/pull/3979), " -"[#3965](https://github.com/adap/flower/pull/3965), " -"[#4013](https://github.com/adap/flower/pull/4013), " -"[#4064](https://github.com/adap/flower/pull/4064))" +"Many documentation pages and tutorials have been updated to improve " +"clarity, fix typos, incorporate user feedback, and stay aligned with the " +"latest features in the framework. Key updates include adding a guide for " +"designing stateful `ClientApp` objects, updating the comprehensive guide " +"for setting up and running Flower's `Simulation Engine`, updating the " +"XGBoost, scikit-learn, and JAX quickstart tutorials to use `flwr run`, " +"updating DP guide, removing outdated pages, updating Docker docs, and " +"marking legacy functions as deprecated. The [Secure Aggregation " +"Protocols](https://flower.ai/docs/framework/contributor-ref-secure-" +"aggregation-protocols.html) page has also been updated." msgstr "" #: ../../source/ref-changelog.md:78 msgid "" -"All `flwr new` templates have been updated to show the latest recommended" -" use of Flower APIs." +"**Update examples and templates** " +"([#4510](https://github.com/adap/flower/pull/4510), " +"[#4368](https://github.com/adap/flower/pull/4368), " +"[#4121](https://github.com/adap/flower/pull/4121), " +"[#4329](https://github.com/adap/flower/pull/4329), " +"[#4382](https://github.com/adap/flower/pull/4382), " +"[#4248](https://github.com/adap/flower/pull/4248), " +"[#4395](https://github.com/adap/flower/pull/4395), " +"[#4386](https://github.com/adap/flower/pull/4386), " +"[#4408](https://github.com/adap/flower/pull/4408))" msgstr "" #: ../../source/ref-changelog.md:80 msgid "" -"**Improve Simulation Engine** " -"([#4095](https://github.com/adap/flower/pull/4095), " -"[#3913](https://github.com/adap/flower/pull/3913), " -"[#4059](https://github.com/adap/flower/pull/4059), " -"[#3954](https://github.com/adap/flower/pull/3954), " -"[#4071](https://github.com/adap/flower/pull/4071), " -"[#3985](https://github.com/adap/flower/pull/3985), " -"[#3988](https://github.com/adap/flower/pull/3988))" +"Multiple examples and templates have been updated to enhance usability " +"and correctness. The updates include the `30-minute-tutorial`, " +"`quickstart-jax`, `quickstart-pytorch`, `advanced-tensorflow` examples, " +"and the FlowerTune template." msgstr "" #: ../../source/ref-changelog.md:82 msgid "" -"The Flower Simulation Engine comes with several updates, including " -"improved run config support, verbose logging, simulation backend " -"configuration via `flwr run`, and more." +"**Improve Docker support** " +"([#4506](https://github.com/adap/flower/pull/4506), " +"[#4424](https://github.com/adap/flower/pull/4424), " +"[#4224](https://github.com/adap/flower/pull/4224), " +"[#4413](https://github.com/adap/flower/pull/4413), " +"[#4414](https://github.com/adap/flower/pull/4414), " +"[#4336](https://github.com/adap/flower/pull/4336), " +"[#4420](https://github.com/adap/flower/pull/4420), " +"[#4407](https://github.com/adap/flower/pull/4407), " +"[#4422](https://github.com/adap/flower/pull/4422), " +"[#4532](https://github.com/adap/flower/pull/4532), " +"[#4540](https://github.com/adap/flower/pull/4540))" msgstr "" #: ../../source/ref-changelog.md:84 msgid "" -"**Improve** `RecordSet` " -"([#4052](https://github.com/adap/flower/pull/4052), " -"[#3218](https://github.com/adap/flower/pull/3218), " -"[#4016](https://github.com/adap/flower/pull/4016))" +"Docker images and configurations have been updated, including updating " +"Docker Compose files to version 1.13.0, refactoring the Docker build " +"matrix for better maintainability, updating `docker/build-push-action` to" +" 6.9.0, and improving Docker documentation." msgstr "" #: ../../source/ref-changelog.md:86 msgid "" -"`RecordSet` is the core object to exchange model parameters, " -"configuration values and metrics between `ClientApp` and `ServerApp`. " -"This release ships several smaller improvements to `RecordSet` and " -"related `*Record` types." +"**Allow app installation without internet access** " +"([#4479](https://github.com/adap/flower/pull/4479), " +"[#4475](https://github.com/adap/flower/pull/4475))" msgstr "" #: ../../source/ref-changelog.md:88 msgid "" -"**Update documentation** " -"([#3972](https://github.com/adap/flower/pull/3972), " -"[#3925](https://github.com/adap/flower/pull/3925), " -"[#4061](https://github.com/adap/flower/pull/4061), " -"[#3984](https://github.com/adap/flower/pull/3984), " -"[#3917](https://github.com/adap/flower/pull/3917), " -"[#3900](https://github.com/adap/flower/pull/3900), " -"[#4066](https://github.com/adap/flower/pull/4066), " -"[#3765](https://github.com/adap/flower/pull/3765), " -"[#4021](https://github.com/adap/flower/pull/4021), " -"[#3906](https://github.com/adap/flower/pull/3906), " -"[#4063](https://github.com/adap/flower/pull/4063), " -"[#4076](https://github.com/adap/flower/pull/4076), " -"[#3920](https://github.com/adap/flower/pull/3920), " -"[#3916](https://github.com/adap/flower/pull/3916))" +"The `flwr build` command now includes a wheel file in the FAB, enabling " +"Flower app installation in environments without internet access via `flwr" +" install`." msgstr "" #: ../../source/ref-changelog.md:90 msgid "" -"Many parts of the documentation, including the main tutorial, have been " -"migrated to show new Flower APIs and other new Flower features like the " -"improved Docker support." +"**Improve `flwr log` command** " +"([#4391](https://github.com/adap/flower/pull/4391), " +"[#4411](https://github.com/adap/flower/pull/4411), " +"[#4390](https://github.com/adap/flower/pull/4390), " +"[#4397](https://github.com/adap/flower/pull/4397))" msgstr "" #: ../../source/ref-changelog.md:92 msgid "" -"**Migrate code example to use new Flower APIs** " -"([#3758](https://github.com/adap/flower/pull/3758), " -"[#3701](https://github.com/adap/flower/pull/3701), " -"[#3919](https://github.com/adap/flower/pull/3919), " -"[#3918](https://github.com/adap/flower/pull/3918), " -"[#3934](https://github.com/adap/flower/pull/3934), " -"[#3893](https://github.com/adap/flower/pull/3893), " -"[#3833](https://github.com/adap/flower/pull/3833), " -"[#3922](https://github.com/adap/flower/pull/3922), " -"[#3846](https://github.com/adap/flower/pull/3846), " -"[#3777](https://github.com/adap/flower/pull/3777), " -"[#3874](https://github.com/adap/flower/pull/3874), " -"[#3873](https://github.com/adap/flower/pull/3873), " -"[#3935](https://github.com/adap/flower/pull/3935), " -"[#3754](https://github.com/adap/flower/pull/3754), " -"[#3980](https://github.com/adap/flower/pull/3980), " -"[#4089](https://github.com/adap/flower/pull/4089), " -"[#4046](https://github.com/adap/flower/pull/4046), " -"[#3314](https://github.com/adap/flower/pull/3314), " -"[#3316](https://github.com/adap/flower/pull/3316), " -"[#3295](https://github.com/adap/flower/pull/3295), " -"[#3313](https://github.com/adap/flower/pull/3313))" +"**Refactor SuperNode for better maintainability and efficiency** " +"([#4439](https://github.com/adap/flower/pull/4439), " +"[#4348](https://github.com/adap/flower/pull/4348), " +"[#4512](https://github.com/adap/flower/pull/4512), " +"[#4485](https://github.com/adap/flower/pull/4485))" msgstr "" #: ../../source/ref-changelog.md:94 -msgid "Many code examples have been migrated to use new Flower APIs." +msgid "" +"**Support NumPy `2.0`** " +"([#4440](https://github.com/adap/flower/pull/4440))" msgstr "" #: ../../source/ref-changelog.md:96 msgid "" -"**Update Flower framework, framework internals and quality " -"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " -"[#4053](https://github.com/adap/flower/pull/4053), " -"[#4098](https://github.com/adap/flower/pull/4098), " -"[#4067](https://github.com/adap/flower/pull/4067), " -"[#4105](https://github.com/adap/flower/pull/4105), " -"[#4048](https://github.com/adap/flower/pull/4048), " -"[#4107](https://github.com/adap/flower/pull/4107), " -"[#4069](https://github.com/adap/flower/pull/4069), " -"[#3915](https://github.com/adap/flower/pull/3915), " -"[#4101](https://github.com/adap/flower/pull/4101), " -"[#4108](https://github.com/adap/flower/pull/4108), " -"[#3914](https://github.com/adap/flower/pull/3914), " -"[#4068](https://github.com/adap/flower/pull/4068), " -"[#4041](https://github.com/adap/flower/pull/4041), " -"[#4040](https://github.com/adap/flower/pull/4040), " -"[#3986](https://github.com/adap/flower/pull/3986), " -"[#4026](https://github.com/adap/flower/pull/4026), " -"[#3961](https://github.com/adap/flower/pull/3961), " -"[#3975](https://github.com/adap/flower/pull/3975), " -"[#3983](https://github.com/adap/flower/pull/3983), " -"[#4091](https://github.com/adap/flower/pull/4091), " -"[#3982](https://github.com/adap/flower/pull/3982), " -"[#4079](https://github.com/adap/flower/pull/4079), " -"[#4073](https://github.com/adap/flower/pull/4073), " -"[#4060](https://github.com/adap/flower/pull/4060), " -"[#4106](https://github.com/adap/flower/pull/4106), " -"[#4080](https://github.com/adap/flower/pull/4080), " -"[#3974](https://github.com/adap/flower/pull/3974), " -"[#3996](https://github.com/adap/flower/pull/3996), " -"[#3991](https://github.com/adap/flower/pull/3991), " -"[#3981](https://github.com/adap/flower/pull/3981), " -"[#4093](https://github.com/adap/flower/pull/4093), " -"[#4100](https://github.com/adap/flower/pull/4100), " -"[#3939](https://github.com/adap/flower/pull/3939), " -"[#3955](https://github.com/adap/flower/pull/3955), " -"[#3940](https://github.com/adap/flower/pull/3940), " -"[#4038](https://github.com/adap/flower/pull/4038))" +"**Update infrastructure and CI/CD** " +"([#4466](https://github.com/adap/flower/pull/4466), " +"[#4419](https://github.com/adap/flower/pull/4419), " +"[#4338](https://github.com/adap/flower/pull/4338), " +"[#4334](https://github.com/adap/flower/pull/4334), " +"[#4456](https://github.com/adap/flower/pull/4456), " +"[#4446](https://github.com/adap/flower/pull/4446), " +"[#4415](https://github.com/adap/flower/pull/4415))" msgstr "" -#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:98 msgid "" -"As always, many parts of the Flower framework and quality infrastructure " -"were improved and updated." +"**Bugfixes** ([#4404](https://github.com/adap/flower/pull/4404), " +"[#4518](https://github.com/adap/flower/pull/4518), " +"[#4452](https://github.com/adap/flower/pull/4452), " +"[#4376](https://github.com/adap/flower/pull/4376), " +"[#4493](https://github.com/adap/flower/pull/4493), " +"[#4436](https://github.com/adap/flower/pull/4436), " +"[#4410](https://github.com/adap/flower/pull/4410), " +"[#4442](https://github.com/adap/flower/pull/4442), " +"[#4375](https://github.com/adap/flower/pull/4375), " +"[#4515](https://github.com/adap/flower/pull/4515))" msgstr "" -#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 -#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +#: ../../source/ref-changelog.md:100 +msgid "" +"**General improvements** " +"([#4454](https://github.com/adap/flower/pull/4454), " +"[#4365](https://github.com/adap/flower/pull/4365), " +"[#4423](https://github.com/adap/flower/pull/4423), " +"[#4516](https://github.com/adap/flower/pull/4516), " +"[#4509](https://github.com/adap/flower/pull/4509), " +"[#4498](https://github.com/adap/flower/pull/4498), " +"[#4371](https://github.com/adap/flower/pull/4371), " +"[#4449](https://github.com/adap/flower/pull/4449), " +"[#4488](https://github.com/adap/flower/pull/4488), " +"[#4478](https://github.com/adap/flower/pull/4478), " +"[#4392](https://github.com/adap/flower/pull/4392), " +"[#4483](https://github.com/adap/flower/pull/4483), " +"[#4517](https://github.com/adap/flower/pull/4517), " +"[#4330](https://github.com/adap/flower/pull/4330), " +"[#4458](https://github.com/adap/flower/pull/4458), " +"[#4347](https://github.com/adap/flower/pull/4347), " +"[#4429](https://github.com/adap/flower/pull/4429), " +"[#4463](https://github.com/adap/flower/pull/4463), " +"[#4496](https://github.com/adap/flower/pull/4496), " +"[#4508](https://github.com/adap/flower/pull/4508), " +"[#4444](https://github.com/adap/flower/pull/4444), " +"[#4417](https://github.com/adap/flower/pull/4417), " +"[#4504](https://github.com/adap/flower/pull/4504), " +"[#4418](https://github.com/adap/flower/pull/4418), " +"[#4480](https://github.com/adap/flower/pull/4480), " +"[#4455](https://github.com/adap/flower/pull/4455), " +"[#4468](https://github.com/adap/flower/pull/4468), " +"[#4385](https://github.com/adap/flower/pull/4385), " +"[#4487](https://github.com/adap/flower/pull/4487), " +"[#4393](https://github.com/adap/flower/pull/4393), " +"[#4489](https://github.com/adap/flower/pull/4489), " +"[#4389](https://github.com/adap/flower/pull/4389), " +"[#4507](https://github.com/adap/flower/pull/4507), " +"[#4469](https://github.com/adap/flower/pull/4469), " +"[#4340](https://github.com/adap/flower/pull/4340), " +"[#4353](https://github.com/adap/flower/pull/4353), " +"[#4494](https://github.com/adap/flower/pull/4494), " +"[#4461](https://github.com/adap/flower/pull/4461), " +"[#4362](https://github.com/adap/flower/pull/4362), " +"[#4473](https://github.com/adap/flower/pull/4473), " +"[#4405](https://github.com/adap/flower/pull/4405), " +"[#4416](https://github.com/adap/flower/pull/4416), " +"[#4453](https://github.com/adap/flower/pull/4453), " +"[#4491](https://github.com/adap/flower/pull/4491), " +"[#4539](https://github.com/adap/flower/pull/4539), " +"[#4542](https://github.com/adap/flower/pull/4542), " +"[#4538](https://github.com/adap/flower/pull/4538), " +"[#4543](https://github.com/adap/flower/pull/4543), " +"[#4541](https://github.com/adap/flower/pull/4541), " +"[#4550](https://github.com/adap/flower/pull/4550), " +"[#4481](https://github.com/adap/flower/pull/4481))" +msgstr "" + +#: ../../source/ref-changelog.md:104 ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:420 ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:1495 msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:102 -msgid "" -"**Deprecate accessing `Context` via `Client.context`** " -"([#3797](https://github.com/adap/flower/pull/3797))" +#: ../../source/ref-changelog.md:106 +msgid "**Deprecate Python 3.9**" msgstr "" -#: ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:108 msgid "" -"Now that both `client_fn` and `server_fn` receive a `Context` object, " -"accessing `Context` via `Client.context` is deprecated. `Client.context` " -"will be removed in a future release. If you need to access `Context` in " -"your `Client` implementation, pass it manually when creating the `Client`" -" instance in `client_fn`:" +"Flower is deprecating support for Python 3.9 as several of its " +"dependencies are phasing out compatibility with this version. While no " +"immediate changes have been made, users are encouraged to plan for " +"upgrading to a supported Python version." msgstr "" -#: ../../source/ref-changelog.md:113 -msgid "" -"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" -" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " -"[#4077](https://github.com/adap/flower/pull/4077), " -"[#3850](https://github.com/adap/flower/pull/3850))" +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:200 +#: ../../source/ref-changelog.md:234 ../../source/ref-changelog.md:314 +#: ../../source/ref-changelog.md:430 ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:600 ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:787 ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:941 ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:1068 ../../source/ref-changelog.md:1130 +#: ../../source/ref-changelog.md:1149 ../../source/ref-changelog.md:1305 +#: ../../source/ref-changelog.md:1376 ../../source/ref-changelog.md:1413 +#: ../../source/ref-changelog.md:1456 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:112 msgid "" -"The CLI commands `flower-supernode` and `flower-server-app` now accept an" -" app directory as argument (instead of references to a `ClientApp` or " -"`ServerApp`). An app directory is any directory containing a " -"`pyproject.toml` file (with the appropriate Flower config fields set). " -"The easiest way to generate a compatible project structure is to use " -"`flwr new`." +"**Remove `flower-superexec` command** " +"([#4351](https://github.com/adap/flower/pull/4351))" msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:114 msgid "" -"**Disable** `flower-client-app` **CLI command** " -"([#4022](https://github.com/adap/flower/pull/4022))" +"The `flower-superexec` command, previously used to launch SuperExec, is " +"no longer functional as SuperExec has been merged into SuperLink. " +"Starting an additional SuperExec is no longer necessary when SuperLink is" +" initiated." msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." +#: ../../source/ref-changelog.md:116 +msgid "" +"**Remove `flower-server-app` command** " +"([#4490](https://github.com/adap/flower/pull/4490))" msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:118 msgid "" -"**Use spaces instead of commas for separating config args** " -"([#4000](https://github.com/adap/flower/pull/4000))" +"The `flower-server-app` command has been removed. To start a Flower app, " +"please use the `flwr run` command instead." msgstr "" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:120 msgid "" -"When passing configs (run config, node config) to Flower, you now need to" -" separate key-value pairs using spaces instead of commas. For example:" +"**Remove `app` argument from `flower-supernode` command** " +"([#4497](https://github.com/adap/flower/pull/4497))" msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "Previously, you could pass configs using commas, like this:" +#: ../../source/ref-changelog.md:122 +msgid "" +"The usage of `flower-supernode ` has been removed. SuperNode " +"will now load the FAB delivered by SuperLink, and it is no longer " +"possible to directly specify an app directory." msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:124 msgid "" -"**Remove** `flwr example` **CLI command** " -"([#4084](https://github.com/adap/flower/pull/4084))" +"**Remove support for non-app simulations** " +"([#4431](https://github.com/adap/flower/pull/4431))" msgstr "" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:126 msgid "" -"The experimental `flwr example` CLI command has been removed. Use `flwr " -"new` to generate a project and then run it using `flwr run`." +"The simulation engine (via `flower-simulation`) now exclusively supports " +"passing an app." msgstr "" -#: ../../source/ref-changelog.md:139 -msgid "v1.10.0 (2024-07-24)" +#: ../../source/ref-changelog.md:128 +msgid "" +"**Rename CLI arguments for `flower-superlink` command** " +"([#4412](https://github.com/adap/flower/pull/4412))" msgstr "" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:130 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " -"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " -"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " +"The `--driver-api-address` argument has been renamed to `--serverappio-" +"api-address` in the `flower-superlink` command to reflect the renaming of" +" the `Driver` service to the `ServerAppIo` service." msgstr "" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:132 msgid "" -"**Introduce** `flwr run` **(beta)** " -"([#3810](https://github.com/adap/flower/pull/3810), " -"[#3826](https://github.com/adap/flower/pull/3826), " -"[#3880](https://github.com/adap/flower/pull/3880), " -"[#3807](https://github.com/adap/flower/pull/3807), " -"[#3800](https://github.com/adap/flower/pull/3800), " -"[#3814](https://github.com/adap/flower/pull/3814), " -"[#3811](https://github.com/adap/flower/pull/3811), " -"[#3809](https://github.com/adap/flower/pull/3809), " -"[#3819](https://github.com/adap/flower/pull/3819))" +"**Rename CLI arguments for `flwr-serverapp` and `flwr-clientapp` " +"commands** ([#4495](https://github.com/adap/flower/pull/4495))" msgstr "" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:134 msgid "" -"Flower 1.10 ships the first beta release of the new `flwr run` command. " -"`flwr run` can run different projects using `flwr run path/to/project`, " -"it enables you to easily switch between different federations using `flwr" -" run . federation` and it runs your Flower project using either local " -"simulation or the new (experimental) SuperExec service. This allows " -"Flower to scale federatated learning from fast local simulation to large-" -"scale production deployment, seamlessly. All projects generated with " -"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " -"`flwr new` to generate a project and then run it using `flwr run`." +"The CLI arguments have been renamed for clarity and consistency. " +"Specifically, `--superlink` for `flwr-serverapp` is now `--serverappio-" +"api-address`, and `--supernode` for `flwr-clientapp` is now " +"`--clientappio-api-address`." msgstr "" -#: ../../source/ref-changelog.md:153 -msgid "" -"**Introduce run config** " -"([#3751](https://github.com/adap/flower/pull/3751), " -"[#3750](https://github.com/adap/flower/pull/3750), " -"[#3845](https://github.com/adap/flower/pull/3845), " -"[#3824](https://github.com/adap/flower/pull/3824), " -"[#3746](https://github.com/adap/flower/pull/3746), " -"[#3728](https://github.com/adap/flower/pull/3728), " -"[#3730](https://github.com/adap/flower/pull/3730), " -"[#3725](https://github.com/adap/flower/pull/3725), " -"[#3729](https://github.com/adap/flower/pull/3729), " -"[#3580](https://github.com/adap/flower/pull/3580), " -"[#3578](https://github.com/adap/flower/pull/3578), " -"[#3576](https://github.com/adap/flower/pull/3576), " -"[#3798](https://github.com/adap/flower/pull/3798), " -"[#3732](https://github.com/adap/flower/pull/3732), " -"[#3815](https://github.com/adap/flower/pull/3815))" +#: ../../source/ref-changelog.md:136 +msgid "v1.12.0 (2024-10-14)" msgstr "" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:142 msgid "" -"The new run config feature allows you to run your Flower project in " -"different configurations without having to change a single line of code. " -"You can now build a configurable `ServerApp` and `ClientApp` that read " -"configuration values at runtime. This enables you to specify config " -"values like `learning-rate=0.01` in `pyproject.toml` (under the " -"`[tool.flwr.app.config]` key). These config values can then be easily " -"overridden via `flwr run --run-config learning-rate=0.02`, and read from " -"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " -"new project using `flwr new` to see run config in action." +"`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel" +" J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, " +"`Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan " +"Gao`, `xiliguguagua` " msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:146 msgid "" -"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " -"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " -"[#3697](https://github.com/adap/flower/pull/3697), " -"[#3694](https://github.com/adap/flower/pull/3694), " -"[#3696](https://github.com/adap/flower/pull/3696))" +"**Introduce SuperExec log streaming** " +"([#3577](https://github.com/adap/flower/pull/3577), " +"[#3584](https://github.com/adap/flower/pull/3584), " +"[#4242](https://github.com/adap/flower/pull/4242), " +"[#3611](https://github.com/adap/flower/pull/3611), " +"[#3613](https://github.com/adap/flower/pull/3613))" msgstr "" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:148 msgid "" -"The `client_fn` signature has been generalized to `client_fn(context: " -"Context) -> Client`. It now receives a `Context` object instead of the " -"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " -"`node_config` and `run_config`, among other things. This enables you to " -"build a configurable `ClientApp` that leverages the new run config " -"system." +"Flower now supports log streaming from a remote SuperExec using the `flwr" +" log` command. This new feature allows you to monitor logs from SuperExec" +" in real time via `flwr log ` (or `flwr log " +"`)." msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:150 msgid "" -"The previous signature `client_fn(cid: str)` is now deprecated and " -"support for it will be removed in a future release. Use " -"`client_fn(context: Context) -> Client` everywhere." +"**Improve `flwr new` templates** " +"([#4291](https://github.com/adap/flower/pull/4291), " +"[#4292](https://github.com/adap/flower/pull/4292), " +"[#4293](https://github.com/adap/flower/pull/4293), " +"[#4294](https://github.com/adap/flower/pull/4294), " +"[#4295](https://github.com/adap/flower/pull/4295))" msgstr "" -#: ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:152 msgid "" -"**Introduce new** `server_fn(context)` " -"([#3773](https://github.com/adap/flower/pull/3773), " -"[#3796](https://github.com/adap/flower/pull/3796), " -"[#3771](https://github.com/adap/flower/pull/3771))" +"The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and " +"PyTorch have been updated to improve usability and consistency across " +"frameworks." msgstr "" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:154 msgid "" -"In addition to the new `client_fn(context:Context)`, a new " -"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" -" `ServerApp` (instead of passing, for example, `Strategy`, directly). " -"This enables you to leverage the full `Context` on the server-side to " -"build a configurable `ServerApp`." +"**Migrate ID handling to use unsigned 64-bit integers** " +"([#4170](https://github.com/adap/flower/pull/4170), " +"[#4237](https://github.com/adap/flower/pull/4237), " +"[#4243](https://github.com/adap/flower/pull/4243))" msgstr "" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-changelog.md:156 msgid "" -"**Relaunch all** `flwr new` **templates** " -"([#3877](https://github.com/adap/flower/pull/3877), " -"[#3821](https://github.com/adap/flower/pull/3821), " -"[#3587](https://github.com/adap/flower/pull/3587), " -"[#3795](https://github.com/adap/flower/pull/3795), " -"[#3875](https://github.com/adap/flower/pull/3875), " -"[#3859](https://github.com/adap/flower/pull/3859), " -"[#3760](https://github.com/adap/flower/pull/3760))" +"Node IDs, run IDs, and related fields have been migrated from signed " +"64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To " +"support this change, the `uint64` type is fully supported in all " +"communications. You may now use `uint64` values in config and metric " +"dictionaries. For Python users, that means using `int` values larger than" +" the maximum value of `sint64` but less than the maximum value of " +"`uint64`." msgstr "" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:158 msgid "" -"All `flwr new` templates have been significantly updated to showcase new " -"Flower features and best practices. This includes using `flwr run` and " -"the new run config feature. You can now easily create a new project using" -" `flwr new` and, after following the instructions to install it, `flwr " -"run` it." +"**Add Flower architecture explanation** " +"([#3270](https://github.com/adap/flower/pull/3270))" msgstr "" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:160 msgid "" -"**Introduce** `flower-supernode` **(preview)** " -"([#3353](https://github.com/adap/flower/pull/3353))" +"A new [Flower architecture explainer](https://flower.ai/docs/framework" +"/explanation-flower-architecture.html) page introduces Flower components " +"step-by-step. Check out the `EXPLANATIONS` section of the Flower " +"documentation if you're interested." msgstr "" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:162 msgid "" -"The new `flower-supernode` CLI is here to replace `flower-client-app`. " -"`flower-supernode` brings full multi-app support to the Flower client-" -"side. It also allows to pass `--node-config` to the SuperNode, which is " -"accessible in your `ClientApp` via `Context` (using the new " -"`client_fn(context: Context)` signature)." +"**Introduce FedRep baseline** " +"([#3790](https://github.com/adap/flower/pull/3790))" msgstr "" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:164 msgid "" -"**Introduce node config** " -"([#3782](https://github.com/adap/flower/pull/3782), " -"[#3780](https://github.com/adap/flower/pull/3780), " -"[#3695](https://github.com/adap/flower/pull/3695), " -"[#3886](https://github.com/adap/flower/pull/3886))" +"FedRep is a federated learning algorithm that learns shared data " +"representations across clients while allowing each to maintain " +"personalized local models, balancing collaboration and individual " +"adaptation. Read all the details in the paper: \"Exploiting Shared " +"Representations for Personalized Federated Learning\" " +"([arxiv](https://arxiv.org/abs/2102.07078))" msgstr "" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:166 msgid "" -"A new node config feature allows you to pass a static configuration to " -"the SuperNode. This configuration is read-only and available to every " -"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " -"config via `Context` (`context.node_config`)." +"**Improve FlowerTune template and LLM evaluation pipelines** " +"([#4286](https://github.com/adap/flower/pull/4286), " +"[#3769](https://github.com/adap/flower/pull/3769), " +"[#4272](https://github.com/adap/flower/pull/4272), " +"[#4257](https://github.com/adap/flower/pull/4257), " +"[#4220](https://github.com/adap/flower/pull/4220), " +"[#4282](https://github.com/adap/flower/pull/4282), " +"[#4171](https://github.com/adap/flower/pull/4171), " +"[#4228](https://github.com/adap/flower/pull/4228), " +"[#4258](https://github.com/adap/flower/pull/4258), " +"[#4296](https://github.com/adap/flower/pull/4296), " +"[#4287](https://github.com/adap/flower/pull/4287), " +"[#4217](https://github.com/adap/flower/pull/4217), " +"[#4249](https://github.com/adap/flower/pull/4249), " +"[#4324](https://github.com/adap/flower/pull/4324), " +"[#4219](https://github.com/adap/flower/pull/4219), " +"[#4327](https://github.com/adap/flower/pull/4327))" msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:168 msgid "" -"**Introduce SuperExec (experimental)** " -"([#3605](https://github.com/adap/flower/pull/3605), " -"[#3723](https://github.com/adap/flower/pull/3723), " -"[#3731](https://github.com/adap/flower/pull/3731), " -"[#3589](https://github.com/adap/flower/pull/3589), " -"[#3604](https://github.com/adap/flower/pull/3604), " -"[#3622](https://github.com/adap/flower/pull/3622), " -"[#3838](https://github.com/adap/flower/pull/3838), " -"[#3720](https://github.com/adap/flower/pull/3720), " -"[#3606](https://github.com/adap/flower/pull/3606), " -"[#3602](https://github.com/adap/flower/pull/3602), " -"[#3603](https://github.com/adap/flower/pull/3603), " -"[#3555](https://github.com/adap/flower/pull/3555), " -"[#3808](https://github.com/adap/flower/pull/3808), " -"[#3724](https://github.com/adap/flower/pull/3724), " -"[#3658](https://github.com/adap/flower/pull/3658), " -"[#3629](https://github.com/adap/flower/pull/3629))" +"Refined evaluation pipelines, metrics, and documentation for the upcoming" +" FlowerTune LLM Leaderboard across multiple domains including Finance, " +"Medical, and general NLP. Stay tuned for the official launch—we welcome " +"all federated learning and LLM enthusiasts to participate in this " +"exciting challenge!" msgstr "" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:170 msgid "" -"This is the first experimental release of Flower SuperExec, a new service" -" that executes your runs. It's not ready for production deployment just " -"yet, but don't hesitate to give it a try if you're interested." +"**Enhance Docker Support and Documentation** " +"([#4191](https://github.com/adap/flower/pull/4191), " +"[#4251](https://github.com/adap/flower/pull/4251), " +"[#4190](https://github.com/adap/flower/pull/4190), " +"[#3928](https://github.com/adap/flower/pull/3928), " +"[#4298](https://github.com/adap/flower/pull/4298), " +"[#4192](https://github.com/adap/flower/pull/4192), " +"[#4136](https://github.com/adap/flower/pull/4136), " +"[#4187](https://github.com/adap/flower/pull/4187), " +"[#4261](https://github.com/adap/flower/pull/4261), " +"[#4177](https://github.com/adap/flower/pull/4177), " +"[#4176](https://github.com/adap/flower/pull/4176), " +"[#4189](https://github.com/adap/flower/pull/4189), " +"[#4297](https://github.com/adap/flower/pull/4297), " +"[#4226](https://github.com/adap/flower/pull/4226))" msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:172 msgid "" -"**Add new federated learning with tabular data example** " -"([#3568](https://github.com/adap/flower/pull/3568))" +"Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images," +" and comprehensively updated [Docker " +"documentation](https://flower.ai/docs/framework/docker/index.html) " +"including quickstart guides and distributed Docker Compose instructions." msgstr "" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:174 msgid "" -"A new code example exemplifies a federated learning setup using the " -"Flower framework on the Adult Census Income tabular dataset." +"**Introduce Flower glossary** " +"([#4165](https://github.com/adap/flower/pull/4165), " +"[#4235](https://github.com/adap/flower/pull/4235))" msgstr "" -#: ../../source/ref-changelog.md:187 +#: ../../source/ref-changelog.md:176 msgid "" -"**Create generic adapter layer (preview)** " -"([#3538](https://github.com/adap/flower/pull/3538), " -"[#3536](https://github.com/adap/flower/pull/3536), " -"[#3540](https://github.com/adap/flower/pull/3540))" +"Added the [Federated Learning glossary](https://flower.ai/glossary/) to " +"the Flower repository, located under the `flower/glossary/` directory. " +"This resource aims to provide clear definitions and explanations of key " +"FL concepts. Community contributions are highly welcomed to help expand " +"and refine this knowledge base — this is probably the easiest way to " +"become a Flower contributor!" msgstr "" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:178 msgid "" -"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" -" with Flower in a transparent way. This makes Flower more modular and " -"allows for integration into other federated learning solutions and " -"platforms." +"**Implement Message Time-to-Live (TTL)** " +"([#3620](https://github.com/adap/flower/pull/3620), " +"[#3596](https://github.com/adap/flower/pull/3596), " +"[#3615](https://github.com/adap/flower/pull/3615), " +"[#3609](https://github.com/adap/flower/pull/3609), " +"[#3635](https://github.com/adap/flower/pull/3635))" msgstr "" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:180 msgid "" -"**Refactor Flower Simulation Engine** " -"([#3581](https://github.com/adap/flower/pull/3581), " -"[#3471](https://github.com/adap/flower/pull/3471), " -"[#3804](https://github.com/adap/flower/pull/3804), " -"[#3468](https://github.com/adap/flower/pull/3468), " -"[#3839](https://github.com/adap/flower/pull/3839), " -"[#3806](https://github.com/adap/flower/pull/3806), " -"[#3861](https://github.com/adap/flower/pull/3861), " -"[#3543](https://github.com/adap/flower/pull/3543), " -"[#3472](https://github.com/adap/flower/pull/3472), " -"[#3829](https://github.com/adap/flower/pull/3829), " -"[#3469](https://github.com/adap/flower/pull/3469))" +"Added comprehensive TTL support for messages in Flower's SuperLink. " +"Messages are now automatically expired and cleaned up based on " +"configurable TTL values, available through the low-level API (and used by" +" default in the high-level API)." msgstr "" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:182 msgid "" -"The Simulation Engine was significantly refactored. This results in " -"faster and more stable simulations. It is also the foundation for " -"upcoming changes that aim to provide the next level of performance and " -"configurability in federated learning simulations." +"**Improve FAB handling** " +"([#4303](https://github.com/adap/flower/pull/4303), " +"[#4264](https://github.com/adap/flower/pull/4264), " +"[#4305](https://github.com/adap/flower/pull/4305), " +"[#4304](https://github.com/adap/flower/pull/4304))" msgstr "" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:184 msgid "" -"**Optimize Docker containers** " -"([#3591](https://github.com/adap/flower/pull/3591))" +"An 8-character hash is now appended to the FAB file name. The `flwr " +"install` command installs FABs with a more flattened folder structure, " +"reducing it from 3 levels to 1." msgstr "" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:186 msgid "" -"Flower Docker containers were optimized and updated to use that latest " -"Flower framework features." +"**Update documentation** " +"([#3341](https://github.com/adap/flower/pull/3341), " +"[#3338](https://github.com/adap/flower/pull/3338), " +"[#3927](https://github.com/adap/flower/pull/3927), " +"[#4152](https://github.com/adap/flower/pull/4152), " +"[#4151](https://github.com/adap/flower/pull/4151), " +"[#3993](https://github.com/adap/flower/pull/3993))" msgstr "" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:188 msgid "" -"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " -"[#3789](https://github.com/adap/flower/pull/3789))" +"Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging " +"Face, Fastai) to use the new `flwr run` command and removed default title" +" from documentation base template. A new blockchain example has been " +"added to FAQ." msgstr "" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:190 msgid "" -"Improved logging aims to be more concise and helpful to show you the " -"details you actually care about." +"**Update example projects** " +"([#3716](https://github.com/adap/flower/pull/3716), " +"[#4007](https://github.com/adap/flower/pull/4007), " +"[#4130](https://github.com/adap/flower/pull/4130), " +"[#4234](https://github.com/adap/flower/pull/4234), " +"[#4206](https://github.com/adap/flower/pull/4206), " +"[#4188](https://github.com/adap/flower/pull/4188), " +"[#4247](https://github.com/adap/flower/pull/4247), " +"[#4331](https://github.com/adap/flower/pull/4331))" msgstr "" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:192 msgid "" -"**Refactor framework internals** " -"([#3621](https://github.com/adap/flower/pull/3621), " -"[#3792](https://github.com/adap/flower/pull/3792), " -"[#3772](https://github.com/adap/flower/pull/3772), " -"[#3805](https://github.com/adap/flower/pull/3805), " -"[#3583](https://github.com/adap/flower/pull/3583), " -"[#3825](https://github.com/adap/flower/pull/3825), " -"[#3597](https://github.com/adap/flower/pull/3597), " -"[#3802](https://github.com/adap/flower/pull/3802), " -"[#3569](https://github.com/adap/flower/pull/3569))" +"Refreshed multiple example projects including vertical FL, PyTorch " +"(advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized " +"Hugging Face quickstart with a smaller language model and removed legacy " +"simulation examples." msgstr "" -#: ../../source/ref-changelog.md:207 -msgid "Documentation improvements" +#: ../../source/ref-changelog.md:194 +msgid "" +"**Update translations** " +"([#4070](https://github.com/adap/flower/pull/4070), " +"[#4316](https://github.com/adap/flower/pull/4316), " +"[#4252](https://github.com/adap/flower/pull/4252), " +"[#4256](https://github.com/adap/flower/pull/4256), " +"[#4210](https://github.com/adap/flower/pull/4210), " +"[#4263](https://github.com/adap/flower/pull/4263), " +"[#4259](https://github.com/adap/flower/pull/4259))" msgstr "" -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:196 msgid "" -"**Add 🇰🇷 Korean translations** " -"([#3680](https://github.com/adap/flower/pull/3680))" +"**General improvements** " +"([#4239](https://github.com/adap/flower/pull/4239), " +"[4276](https://github.com/adap/flower/pull/4276), " +"[4204](https://github.com/adap/flower/pull/4204), " +"[4184](https://github.com/adap/flower/pull/4184), " +"[4227](https://github.com/adap/flower/pull/4227), " +"[4183](https://github.com/adap/flower/pull/4183), " +"[4202](https://github.com/adap/flower/pull/4202), " +"[4250](https://github.com/adap/flower/pull/4250), " +"[4267](https://github.com/adap/flower/pull/4267), " +"[4246](https://github.com/adap/flower/pull/4246), " +"[4240](https://github.com/adap/flower/pull/4240), " +"[4265](https://github.com/adap/flower/pull/4265), " +"[4238](https://github.com/adap/flower/pull/4238), " +"[4275](https://github.com/adap/flower/pull/4275), " +"[4318](https://github.com/adap/flower/pull/4318), " +"[#4178](https://github.com/adap/flower/pull/4178), " +"[#4315](https://github.com/adap/flower/pull/4315), " +"[#4241](https://github.com/adap/flower/pull/4241), " +"[#4289](https://github.com/adap/flower/pull/4289), " +"[#4290](https://github.com/adap/flower/pull/4290), " +"[#4181](https://github.com/adap/flower/pull/4181), " +"[#4208](https://github.com/adap/flower/pull/4208), " +"[#4225](https://github.com/adap/flower/pull/4225), " +"[#4314](https://github.com/adap/flower/pull/4314), " +"[#4174](https://github.com/adap/flower/pull/4174), " +"[#4203](https://github.com/adap/flower/pull/4203), " +"[#4274](https://github.com/adap/flower/pull/4274), " +"[#3154](https://github.com/adap/flower/pull/3154), " +"[#4201](https://github.com/adap/flower/pull/4201), " +"[#4268](https://github.com/adap/flower/pull/4268), " +"[#4254](https://github.com/adap/flower/pull/4254), " +"[#3990](https://github.com/adap/flower/pull/3990), " +"[#4212](https://github.com/adap/flower/pull/4212), " +"[#2938](https://github.com/adap/flower/pull/2938), " +"[#4205](https://github.com/adap/flower/pull/4205), " +"[#4222](https://github.com/adap/flower/pull/4222), " +"[#4313](https://github.com/adap/flower/pull/4313), " +"[#3936](https://github.com/adap/flower/pull/3936), " +"[#4278](https://github.com/adap/flower/pull/4278), " +"[#4319](https://github.com/adap/flower/pull/4319), " +"[#4332](https://github.com/adap/flower/pull/4332), " +"[#4333](https://github.com/adap/flower/pull/4333))" +msgstr "" + +#: ../../source/ref-changelog.md:202 +msgid "" +"**Drop Python 3.8 support and update minimum version to 3.9** " +"([#4180](https://github.com/adap/flower/pull/4180), " +"[#4213](https://github.com/adap/flower/pull/4213), " +"[#4193](https://github.com/adap/flower/pull/4193), " +"[#4199](https://github.com/adap/flower/pull/4199), " +"[#4196](https://github.com/adap/flower/pull/4196), " +"[#4195](https://github.com/adap/flower/pull/4195), " +"[#4198](https://github.com/adap/flower/pull/4198), " +"[#4194](https://github.com/adap/flower/pull/4194))" +msgstr "" + +#: ../../source/ref-changelog.md:204 +msgid "" +"Python 3.8 support was deprecated in Flower 1.9, and this release removes" +" support. Flower now requires Python 3.9 or later (Python 3.11 is " +"recommended). CI and documentation were updated to use Python 3.9 as the " +"minimum supported version. Flower now supports Python 3.9 to 3.12." +msgstr "" + +#: ../../source/ref-changelog.md:206 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:212 msgid "" -"**Update translations** " -"([#3586](https://github.com/adap/flower/pull/3586), " -"[#3679](https://github.com/adap/flower/pull/3679), " -"[#3570](https://github.com/adap/flower/pull/3570), " -"[#3681](https://github.com/adap/flower/pull/3681), " -"[#3617](https://github.com/adap/flower/pull/3617), " -"[#3674](https://github.com/adap/flower/pull/3674), " -"[#3671](https://github.com/adap/flower/pull/3671), " -"[#3572](https://github.com/adap/flower/pull/3572), " -"[#3631](https://github.com/adap/flower/pull/3631))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:213 -msgid "" -"**Update documentation** " -"([#3864](https://github.com/adap/flower/pull/3864), " -"[#3688](https://github.com/adap/flower/pull/3688), " -"[#3562](https://github.com/adap/flower/pull/3562), " -"[#3641](https://github.com/adap/flower/pull/3641), " -"[#3384](https://github.com/adap/flower/pull/3384), " -"[#3634](https://github.com/adap/flower/pull/3634), " -"[#3823](https://github.com/adap/flower/pull/3823), " -"[#3793](https://github.com/adap/flower/pull/3793), " -"[#3707](https://github.com/adap/flower/pull/3707))" +#: ../../source/ref-changelog.md:214 +msgid "Improvements" msgstr "" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:216 msgid "" -"Updated documentation includes new install instructions for different " -"shells, a new Flower Code Examples documentation landing page, new `flwr`" -" CLI docs and an updated federated XGBoost code example." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-changelog.md:219 -msgid "**Deprecate** `client_fn(cid: str)`" +#: ../../source/ref-changelog.md:218 +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:220 msgid "" -"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " -"This signature is now deprecated. Use the new signature " -"`client_fn(context: Context) -> Client` instead. The new argument " -"`context` allows accessing `node_id`, `node_config`, `run_config` and " -"other `Context` features. When running using the simulation engine (or " -"using `flower-supernode` with a custom `--node-config partition-id=...`)," -" `context.node_config[\"partition-id\"]` will return an `int` partition " -"ID that can be used with Flower Datasets to load a different partition of" -" the dataset on each simulated or deployed SuperNode." +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:222 msgid "" -"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" -" `ServerApp` **directly**" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-changelog.md:225 +#: ../../source/ref-changelog.md:224 msgid "" -"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " -"is now deprecated. Instead of passing " -"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " -"pass them wrapped in a `server_fn(context: Context) -> " -"ServerAppComponents` function, like this: " -"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " -"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " -"to that, `server_fn` allows you to access `Context` (for example, to read" -" the `run_config`)." +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:226 msgid "" -"**Remove support for `client_ids` in `start_simulation`** " -"([#3699](https://github.com/adap/flower/pull/3699))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:228 msgid "" -"The (rarely used) feature that allowed passing custom `client_ids` to the" -" `start_simulation` function was removed. This removal is part of a " -"bigger effort to refactor the simulation engine and unify how the Flower " -"internals work in simulation and deployment." +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-changelog.md:233 +#: ../../source/ref-changelog.md:230 msgid "" -"**Remove `flower-driver-api` and `flower-fleet-api`** " -"([#3418](https://github.com/adap/flower/pull/3418))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:232 msgid "" -"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" -"api` were removed in an effort to streamline the SuperLink developer " -"experience. Use `flower-superlink` instead." +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-changelog.md:237 -msgid "v1.9.0 (2024-06-10)" +#: ../../source/ref-changelog.md:238 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-changelog.md:243 +#: ../../source/ref-changelog.md:244 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:247 +#: ../../source/ref-changelog.md:248 msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" -msgstr "" - -#: ../../source/ref-changelog.md:249 -msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:251 +#: ../../source/ref-changelog.md:250 msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:253 +#: ../../source/ref-changelog.md:252 msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:255 -msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +#: ../../source/ref-changelog.md:254 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:257 +#: ../../source/ref-changelog.md:256 msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-changelog.md:259 +#: ../../source/ref-changelog.md:258 msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." +msgstr "" + +#: ../../source/ref-changelog.md:260 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-changelog.md:261 +#: ../../source/ref-changelog.md:262 msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" #: ../../source/ref-changelog.md:263 msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:265 +#: ../../source/ref-changelog.md:264 msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:266 msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:268 msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" +msgstr "" + +#: ../../source/ref-changelog.md:270 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" #: ../../source/ref-changelog.md:271 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." +msgstr "" + +#: ../../source/ref-changelog.md:272 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" #: ../../source/ref-changelog.md:273 msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" #: ../../source/ref-changelog.md:275 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" #: ../../source/ref-changelog.md:277 msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" #: ../../source/ref-changelog.md:279 msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" #: ../../source/ref-changelog.md:281 msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" #: ../../source/ref-changelog.md:283 msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" #: ../../source/ref-changelog.md:285 msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" #: ../../source/ref-changelog.md:287 msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" #: ../../source/ref-changelog.md:289 msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" #: ../../source/ref-changelog.md:291 msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" #: ../../source/ref-changelog.md:293 msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." msgstr "" #: ../../source/ref-changelog.md:295 msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" msgstr "" #: ../../source/ref-changelog.md:297 -msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +msgid "Many code examples have been migrated to use new Flower APIs." msgstr "" #: ../../source/ref-changelog.md:299 msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" msgstr "" -#: ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:305 msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:307 msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" -msgstr "" - -#: ../../source/ref-changelog.md:305 -msgid "As always, Flower code examples have received many updates." +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:307 +#: ../../source/ref-changelog.md:316 msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: ../../source/ref-changelog.md:311 -msgid "**Deprecate Python 3.8 support**" +#: ../../source/ref-changelog.md:318 +msgid "" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:313 +#: ../../source/ref-changelog.md:320 msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-changelog.md:315 -msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +#: ../../source/ref-changelog.md:322 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:317 +#: ../../source/ref-changelog.md:324 msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-changelog.md:319 +#: ../../source/ref-changelog.md:326 msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:321 -msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +#: ../../source/ref-changelog.md:332 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:325 +#: ../../source/ref-changelog.md:338 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-changelog.md:327 +#: ../../source/ref-changelog.md:340 msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:329 -msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +#: ../../source/ref-changelog.md:342 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-changelog.md:331 +#: ../../source/ref-changelog.md:348 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:333 +#: ../../source/ref-changelog.md:352 msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-changelog.md:335 +#: ../../source/ref-changelog.md:354 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:337 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-changelog.md:356 +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-changelog.md:343 +#: ../../source/ref-changelog.md:358 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:347 +#: ../../source/ref-changelog.md:360 msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-changelog.md:349 +#: ../../source/ref-changelog.md:362 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-changelog.md:351 +#: ../../source/ref-changelog.md:364 msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:353 +#: ../../source/ref-changelog.md:366 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-changelog.md:355 +#: ../../source/ref-changelog.md:368 msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:357 +#: ../../source/ref-changelog.md:370 msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-changelog.md:359 +#: ../../source/ref-changelog.md:372 msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:361 +#: ../../source/ref-changelog.md:374 msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-changelog.md:363 +#: ../../source/ref-changelog.md:376 msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:365 +#: ../../source/ref-changelog.md:378 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-changelog.md:367 +#: ../../source/ref-changelog.md:380 msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-changelog.md:369 +#: ../../source/ref-changelog.md:382 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-changelog.md:371 +#: ../../source/ref-changelog.md:384 msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:373 +#: ../../source/ref-changelog.md:386 msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-changelog.md:375 +#: ../../source/ref-changelog.md:388 msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:377 +#: ../../source/ref-changelog.md:390 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-changelog.md:379 +#: ../../source/ref-changelog.md:392 msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-changelog.md:381 +#: ../../source/ref-changelog.md:394 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-changelog.md:383 +#: ../../source/ref-changelog.md:396 msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:385 +#: ../../source/ref-changelog.md:398 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-changelog.md:387 +#: ../../source/ref-changelog.md:400 msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:389 +#: ../../source/ref-changelog.md:402 msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-changelog.md:391 +#: ../../source/ref-changelog.md:404 msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:393 +#: ../../source/ref-changelog.md:406 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-changelog.md:395 -msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +#: ../../source/ref-changelog.md:410 +msgid "Documentation improvements" msgstr "" -#: ../../source/ref-changelog.md:401 -msgid "v1.7.0 (2024-02-05)" +#: ../../source/ref-changelog.md:412 +msgid "" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-changelog.md:407 +#: ../../source/ref-changelog.md:414 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-changelog.md:411 +#: ../../source/ref-changelog.md:416 msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-changelog.md:413 +#: ../../source/ref-changelog.md:418 msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-changelog.md:415 -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +#: ../../source/ref-changelog.md:422 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-changelog.md:417 +#: ../../source/ref-changelog.md:424 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-changelog.md:419 +#: ../../source/ref-changelog.md:426 msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-changelog.md:421 +#: ../../source/ref-changelog.md:428 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:432 msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-changelog.md:425 +#: ../../source/ref-changelog.md:434 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:427 +#: ../../source/ref-changelog.md:436 msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-changelog.md:438 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." +msgstr "" + +#: ../../source/ref-changelog.md:440 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:446 msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-changelog.md:450 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:452 msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:454 msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:456 msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:458 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:460 msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:462 msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:464 msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: ../../source/ref-changelog.md:449 +#: ../../source/ref-changelog.md:466 msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: ../../source/ref-changelog.md:451 -msgid "Many Flower code examples received substantial updates." +#: ../../source/ref-changelog.md:468 +msgid "" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 -msgid "**Update Flower Baselines**" +#: ../../source/ref-changelog.md:470 +msgid "" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:472 msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: ../../source/ref-changelog.md:456 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-changelog.md:474 +msgid "" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: ../../source/ref-changelog.md:457 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: ../../source/ref-changelog.md:476 +msgid "" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: ../../source/ref-changelog.md:458 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: ../../source/ref-changelog.md:478 +msgid "" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:459 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-changelog.md:480 +msgid "" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: ../../source/ref-changelog.md:460 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-changelog.md:482 +msgid "" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: ../../source/ref-changelog.md:462 +#: ../../source/ref-changelog.md:484 msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: ../../source/ref-changelog.md:464 +#: ../../source/ref-changelog.md:486 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" -msgstr "" - -#: ../../source/ref-changelog.md:466 -msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." -msgstr "" - -#: ../../source/ref-changelog.md:468 -msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" -msgstr "" - -#: ../../source/ref-changelog.md:470 -msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" -msgstr "" - -#: ../../source/ref-changelog.md:474 -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" -msgstr "" - -#: ../../source/ref-changelog.md:476 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." -msgstr "" - -#: ../../source/ref-changelog.md:478 -msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: ../../source/ref-changelog.md:480 +#: ../../source/ref-changelog.md:488 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:490 msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-changelog.md:492 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:494 msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: ../../source/ref-changelog.md:488 +#: ../../source/ref-changelog.md:496 msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-changelog.md:498 msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-changelog.md:500 msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." -msgstr "" - -#: ../../source/ref-changelog.md:494 -msgid "v1.6.0 (2023-11-28)" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-changelog.md:502 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" #: ../../source/ref-changelog.md:504 msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" #: ../../source/ref-changelog.md:506 msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" #: ../../source/ref-changelog.md:508 -msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +msgid "As always, Flower code examples have received many updates." msgstr "" #: ../../source/ref-changelog.md:510 msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "" - -#: ../../source/ref-changelog.md:512 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" #: ../../source/ref-changelog.md:514 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +msgid "**Deprecate Python 3.8 support**" msgstr "" #: ../../source/ref-changelog.md:516 msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" #: ../../source/ref-changelog.md:518 msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" #: ../../source/ref-changelog.md:520 -msgid "Add gRPC request-response capability to the Android SDK." +msgid "" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" #: ../../source/ref-changelog.md:522 msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" #: ../../source/ref-changelog.md:524 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "" - -#: ../../source/ref-changelog.md:526 msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" #: ../../source/ref-changelog.md:528 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" #: ../../source/ref-changelog.md:530 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" #: ../../source/ref-changelog.md:532 msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" #: ../../source/ref-changelog.md:534 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" #: ../../source/ref-changelog.md:536 msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" #: ../../source/ref-changelog.md:538 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" #: ../../source/ref-changelog.md:540 -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "" - -#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 -msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-changelog.md:546 msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" #: ../../source/ref-changelog.md:550 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" #: ../../source/ref-changelog.md:552 msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" #: ../../source/ref-changelog.md:554 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" #: ../../source/ref-changelog.md:556 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" #: ../../source/ref-changelog.md:558 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" #: ../../source/ref-changelog.md:560 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgid "" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" #: ../../source/ref-changelog.md:562 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgid "" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" #: ../../source/ref-changelog.md:564 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" #: ../../source/ref-changelog.md:566 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "" - -#: ../../source/ref-changelog.md:568 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "" - -#: ../../source/ref-changelog.md:570 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:568 msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:570 msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: ../../source/ref-changelog.md:576 +#: ../../source/ref-changelog.md:572 msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:574 msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:576 msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" -msgstr "" - -#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 -#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 -#: ../../source/ref-changelog.md:857 -msgid "Flower received many improvements under the hood, too many to list here." +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:578 msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:580 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:582 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/ref-changelog.md:592 +#: ../../source/ref-changelog.md:584 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: ../../source/ref-changelog.md:594 -msgid "v1.5.0 (2023-08-31)" +#: ../../source/ref-changelog.md:586 +msgid "" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:588 msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:590 msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:592 msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:594 msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:596 msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:598 msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" + +#: ../../source/ref-changelog.md:604 +msgid "v1.7.0 (2024-02-05)" +msgstr "" + +#: ../../source/ref-changelog.md:610 +msgid "" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" #: ../../source/ref-changelog.md:614 msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" #: ../../source/ref-changelog.md:616 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" #: ../../source/ref-changelog.md:618 msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" #: ../../source/ref-changelog.md:620 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" #: ../../source/ref-changelog.md:622 msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" #: ../../source/ref-changelog.md:624 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" #: ../../source/ref-changelog.md:626 -msgid "**Deprecate Python 3.7**" +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" #: ../../source/ref-changelog.md:628 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" #: ../../source/ref-changelog.md:630 msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" #: ../../source/ref-changelog.md:632 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" #: ../../source/ref-changelog.md:634 msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" #: ../../source/ref-changelog.md:636 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" #: ../../source/ref-changelog.md:638 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" #: ../../source/ref-changelog.md:640 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" #: ../../source/ref-changelog.md:642 msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" #: ../../source/ref-changelog.md:644 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" #: ../../source/ref-changelog.md:646 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" #: ../../source/ref-changelog.md:648 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" #: ../../source/ref-changelog.md:650 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" #: ../../source/ref-changelog.md:652 msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" #: ../../source/ref-changelog.md:654 -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-changelog.md:656 -msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +#: ../../source/ref-changelog.md:656 ../../source/ref-changelog.md:749 +msgid "**Update Flower Baselines**" msgstr "" #: ../../source/ref-changelog.md:658 msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/ref-changelog.md:660 -msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +#: ../../source/ref-changelog.md:659 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "" + +#: ../../source/ref-changelog.md:660 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "" + +#: ../../source/ref-changelog.md:661 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" #: ../../source/ref-changelog.md:662 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "" + +#: ../../source/ref-changelog.md:663 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "" + +#: ../../source/ref-changelog.md:665 msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-changelog.md:664 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:667 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:669 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-changelog.md:668 +#: ../../source/ref-changelog.md:671 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-changelog.md:670 +#: ../../source/ref-changelog.md:673 msgid "" "**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-changelog.md:678 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:677 +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:679 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:681 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:683 msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:685 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:687 msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:689 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:691 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:693 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:695 msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-changelog.md:704 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +#: ../../source/ref-changelog.md:697 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:703 msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:707 msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:709 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:711 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:713 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:715 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:717 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:719 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:721 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:724 -msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +#: ../../source/ref-changelog.md:723 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:725 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +#: ../../source/ref-changelog.md:727 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:729 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:731 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:734 +#: ../../source/ref-changelog.md:733 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:742 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:735 +msgid "" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:737 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:739 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:741 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:743 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:745 ../../source/ref-changelog.md:747 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:751 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-changelog.md:762 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-changelog.md:753 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:755 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:757 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-changelog.md:768 -msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +#: ../../source/ref-changelog.md:759 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-changelog.md:770 -msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +#: ../../source/ref-changelog.md:761 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/ref-changelog.md:772 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-changelog.md:763 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-changelog.md:774 -msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +#: ../../source/ref-changelog.md:765 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-changelog.md:776 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +#: ../../source/ref-changelog.md:767 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:769 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "" + +#: ../../source/ref-changelog.md:771 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "" + +#: ../../source/ref-changelog.md:773 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:775 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:777 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:779 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:781 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:783 msgid "" "**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:785 ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:1060 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-changelog.md:792 +#: ../../source/ref-changelog.md:789 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:791 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-changelog.md:800 -msgid "v1.2.0 (2023-01-13)" +#: ../../source/ref-changelog.md:793 +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-changelog.md:806 +#: ../../source/ref-changelog.md:795 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:797 +msgid "v1.5.0 (2023-08-31)" +msgstr "" + +#: ../../source/ref-changelog.md:803 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/ref-changelog.md:812 +#: ../../source/ref-changelog.md:807 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:809 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:811 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:813 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:815 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:817 msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:819 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:821 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" + +#: ../../source/ref-changelog.md:823 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" #: ../../source/ref-changelog.md:825 msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" #: ../../source/ref-changelog.md:827 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" #: ../../source/ref-changelog.md:829 -msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +msgid "**Deprecate Python 3.7**" msgstr "" #: ../../source/ref-changelog.md:831 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" #: ../../source/ref-changelog.md:833 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" #: ../../source/ref-changelog.md:835 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" #: ../../source/ref-changelog.md:837 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" #: ../../source/ref-changelog.md:839 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" #: ../../source/ref-changelog.md:841 msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" #: ../../source/ref-changelog.md:843 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" #: ../../source/ref-changelog.md:845 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" #: ../../source/ref-changelog.md:847 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" #: ../../source/ref-changelog.md:849 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" #: ../../source/ref-changelog.md:851 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" #: ../../source/ref-changelog.md:853 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" #: ../../source/ref-changelog.md:855 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:857 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:859 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:869 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-changelog.md:861 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:863 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:865 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +#: ../../source/ref-changelog.md:867 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/ref-changelog.md:881 +#: ../../source/ref-changelog.md:869 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:871 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:873 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:887 -msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +#: ../../source/ref-changelog.md:881 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:887 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" #: ../../source/ref-changelog.md:891 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" #: ../../source/ref-changelog.md:893 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" #: ../../source/ref-changelog.md:895 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" #: ../../source/ref-changelog.md:897 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" #: ../../source/ref-changelog.md:899 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" #: ../../source/ref-changelog.md:901 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" #: ../../source/ref-changelog.md:903 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" #: ../../source/ref-changelog.md:905 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" #: ../../source/ref-changelog.md:907 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" #: ../../source/ref-changelog.md:909 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" #: ../../source/ref-changelog.md:911 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" -msgstr "" - -#: ../../source/ref-changelog.md:913 -msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" +msgstr "" + +#: ../../source/ref-changelog.md:913 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" #: ../../source/ref-changelog.md:915 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" #: ../../source/ref-changelog.md:917 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" #: ../../source/ref-changelog.md:919 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" #: ../../source/ref-changelog.md:921 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" #: ../../source/ref-changelog.md:923 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" #: ../../source/ref-changelog.md:925 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." -msgstr "" - -#: ../../source/ref-changelog.md:931 -msgid "v1.0.0 (2022-07-28)" -msgstr "" - -#: ../../source/ref-changelog.md:933 -msgid "Highlights" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-changelog.md:935 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:927 +msgid "" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-changelog.md:936 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/ref-changelog.md:929 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-changelog.md:937 -msgid "Configurable `get_parameters`" +#: ../../source/ref-changelog.md:931 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/ref-changelog.md:938 +#: ../../source/ref-changelog.md:933 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/ref-changelog.md:942 +#: ../../source/ref-changelog.md:935 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/ref-changelog.md:944 +#: ../../source/ref-changelog.md:937 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/ref-changelog.md:948 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +#: ../../source/ref-changelog.md:945 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/ref-changelog.md:950 +#: ../../source/ref-changelog.md:951 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:952 +#: ../../source/ref-changelog.md:955 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/ref-changelog.md:954 +#: ../../source/ref-changelog.md:957 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/ref-changelog.md:956 +#: ../../source/ref-changelog.md:959 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:961 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-changelog.md:960 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/ref-changelog.md:963 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/ref-changelog.md:961 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/ref-changelog.md:965 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/ref-changelog.md:962 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/ref-changelog.md:967 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:969 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:971 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/ref-changelog.md:968 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-changelog.md:973 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:975 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:977 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:979 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:981 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:983 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:985 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:987 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/ref-changelog.md:984 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +#: ../../source/ref-changelog.md:989 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:991 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:995 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:997 ../../source/ref-changelog.md:1064 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:1003 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-changelog.md:992 +#: ../../source/ref-changelog.md:1009 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:1013 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-changelog.md:996 +#: ../../source/ref-changelog.md:1015 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-changelog.md:998 +#: ../../source/ref-changelog.md:1017 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/ref-changelog.md:1000 +#: ../../source/ref-changelog.md:1019 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/ref-changelog.md:1002 +#: ../../source/ref-changelog.md:1021 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/ref-changelog.md:1004 +#: ../../source/ref-changelog.md:1023 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/ref-changelog.md:1008 +#: ../../source/ref-changelog.md:1025 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1010 +#: ../../source/ref-changelog.md:1026 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1012 +#: ../../source/ref-changelog.md:1027 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1014 +#: ../../source/ref-changelog.md:1028 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:1016 +#: ../../source/ref-changelog.md:1030 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-changelog.md:1018 +#: ../../source/ref-changelog.md:1032 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/ref-changelog.md:1020 +#: ../../source/ref-changelog.md:1034 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/ref-changelog.md:1022 +#: ../../source/ref-changelog.md:1036 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-changelog.md:1024 -msgid "`scikit-learn`" +#: ../../source/ref-changelog.md:1038 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/ref-changelog.md:1025 -msgid "`simulation_pytorch`" +#: ../../source/ref-changelog.md:1040 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-changelog.md:1026 -msgid "`quickstart_pytorch`" +#: ../../source/ref-changelog.md:1042 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/ref-changelog.md:1027 -msgid "`quickstart_simulation`" +#: ../../source/ref-changelog.md:1044 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/ref-changelog.md:1028 -msgid "`quickstart_tensorflow`" +#: ../../source/ref-changelog.md:1046 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-changelog.md:1029 -msgid "`advanced_tensorflow`" +#: ../../source/ref-changelog.md:1048 +msgid "" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/ref-changelog.md:1031 +#: ../../source/ref-changelog.md:1050 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:1052 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:1054 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -msgstr "" - -#: ../../source/ref-changelog.md:1037 -msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" -msgstr "" - -#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 -#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 -msgid "**Minor updates**" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:1056 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:1042 +#: ../../source/ref-changelog.md:1058 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-changelog.md:1043 +#: ../../source/ref-changelog.md:1062 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-changelog.md:1044 +#: ../../source/ref-changelog.md:1066 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-changelog.md:1046 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:1072 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-changelog.md:1050 +#: ../../source/ref-changelog.md:1076 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:1052 +#: ../../source/ref-changelog.md:1078 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/ref-changelog.md:1054 +#: ../../source/ref-changelog.md:1082 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:1084 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:1086 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:1088 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/ref-changelog.md:1062 +#: ../../source/ref-changelog.md:1090 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/ref-changelog.md:1064 +#: ../../source/ref-changelog.md:1092 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:1094 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:1096 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1098 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:1100 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:1102 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/ref-changelog.md:1076 +#: ../../source/ref-changelog.md:1104 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/ref-changelog.md:1078 +#: ../../source/ref-changelog.md:1106 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1108 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:1082 +#: ../../source/ref-changelog.md:1110 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:1084 +#: ../../source/ref-changelog.md:1112 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:1086 +#: ../../source/ref-changelog.md:1114 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:1088 +#: ../../source/ref-changelog.md:1116 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:1090 +#: ../../source/ref-changelog.md:1118 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:1092 +#: ../../source/ref-changelog.md:1120 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:1096 +#: ../../source/ref-changelog.md:1122 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:1097 +#: ../../source/ref-changelog.md:1124 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/ref-changelog.md:1098 +#: ../../source/ref-changelog.md:1126 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:1099 +#: ../../source/ref-changelog.md:1128 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:1100 -msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +#: ../../source/ref-changelog.md:1134 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:1104 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +#: ../../source/ref-changelog.md:1136 +msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:1105 -msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +#: ../../source/ref-changelog.md:1138 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:1106 -msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +#: ../../source/ref-changelog.md:1139 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:1107 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:1140 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:1108 +#: ../../source/ref-changelog.md:1141 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/ref-changelog.md:1109 +#: ../../source/ref-changelog.md:1145 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/ref-changelog.md:1110 +#: ../../source/ref-changelog.md:1147 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/ref-changelog.md:1111 +#: ../../source/ref-changelog.md:1151 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/ref-changelog.md:1113 -msgid "v0.18.0 (2022-02-28)" +#: ../../source/ref-changelog.md:1153 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/ref-changelog.md:1117 +#: ../../source/ref-changelog.md:1155 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:1119 +#: ../../source/ref-changelog.md:1157 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:1159 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1123 +#: ../../source/ref-changelog.md:1161 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:1125 -msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +#: ../../source/ref-changelog.md:1163 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:1127 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +#: ../../source/ref-changelog.md:1164 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:1129 -msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +#: ../../source/ref-changelog.md:1165 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:1167 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:1169 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:1135 -msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +#: ../../source/ref-changelog.md:1171 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:1137 +#: ../../source/ref-changelog.md:1173 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:1175 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:1141 +#: ../../source/ref-changelog.md:1177 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:1143 +#: ../../source/ref-changelog.md:1179 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:1145 +#: ../../source/ref-changelog.md:1181 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:1183 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:1185 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:1187 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "" + +#: ../../source/ref-changelog.md:1189 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:1153 +#: ../../source/ref-changelog.md:1191 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:1155 +#: ../../source/ref-changelog.md:1193 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:1157 +#: ../../source/ref-changelog.md:1195 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:1159 +#: ../../source/ref-changelog.md:1197 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:1161 +#: ../../source/ref-changelog.md:1199 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:1165 +#: ../../source/ref-changelog.md:1201 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:1166 +#: ../../source/ref-changelog.md:1203 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:1167 +#: ../../source/ref-changelog.md:1205 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:1168 +#: ../../source/ref-changelog.md:1207 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:1169 +#: ../../source/ref-changelog.md:1211 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:1170 +#: ../../source/ref-changelog.md:1213 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:1171 +#: ../../source/ref-changelog.md:1215 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:1175 +#: ../../source/ref-changelog.md:1217 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:1177 +#: ../../source/ref-changelog.md:1219 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:1179 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:1221 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:1183 +#: ../../source/ref-changelog.md:1223 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:1185 +#: ../../source/ref-changelog.md:1225 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-changelog.md:1187 -msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +#: ../../source/ref-changelog.md:1227 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:1189 -msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +#: ../../source/ref-changelog.md:1228 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1191 -msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +#: ../../source/ref-changelog.md:1229 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1192 -msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +#: ../../source/ref-changelog.md:1230 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:1194 -msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +#: ../../source/ref-changelog.md:1231 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:1196 -msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +#: ../../source/ref-changelog.md:1232 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:1198 +#: ../../source/ref-changelog.md:1234 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:1200 +#: ../../source/ref-changelog.md:1236 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:1204 +#: ../../source/ref-changelog.md:1238 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:1205 +#: ../../source/ref-changelog.md:1240 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:1206 -msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +#: ../../source/ref-changelog.md:1242 ../../source/ref-changelog.md:1297 +#: ../../source/ref-changelog.md:1366 ../../source/ref-changelog.md:1405 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:1207 +#: ../../source/ref-changelog.md:1244 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:1208 +#: ../../source/ref-changelog.md:1245 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:1212 +#: ../../source/ref-changelog.md:1246 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:1214 +#: ../../source/ref-changelog.md:1247 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:1216 +#: ../../source/ref-changelog.md:1249 +msgid "v0.19.0 (2022-05-18)" +msgstr "" + +#: ../../source/ref-changelog.md:1253 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:1218 +#: ../../source/ref-changelog.md:1255 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:1220 +#: ../../source/ref-changelog.md:1257 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:1222 +#: ../../source/ref-changelog.md:1259 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:1224 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:1261 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:1228 +#: ../../source/ref-changelog.md:1263 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:1230 -msgid "(abstract) FedOpt" +#: ../../source/ref-changelog.md:1265 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:1233 +#: ../../source/ref-changelog.md:1267 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:1235 +#: ../../source/ref-changelog.md:1269 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:1237 +#: ../../source/ref-changelog.md:1271 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:1239 +#: ../../source/ref-changelog.md:1273 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:1241 +#: ../../source/ref-changelog.md:1275 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:1243 +#: ../../source/ref-changelog.md:1277 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:1245 +#: ../../source/ref-changelog.md:1279 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:1247 +#: ../../source/ref-changelog.md:1281 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:1249 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:1283 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1285 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:1255 +#: ../../source/ref-changelog.md:1287 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:1257 +#: ../../source/ref-changelog.md:1289 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:1259 +#: ../../source/ref-changelog.md:1291 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:1261 +#: ../../source/ref-changelog.md:1293 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:1263 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:1295 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-changelog.md:1267 +#: ../../source/ref-changelog.md:1299 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-changelog.md:1269 +#: ../../source/ref-changelog.md:1300 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-changelog.md:1271 +#: ../../source/ref-changelog.md:1301 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-changelog.md:1290 +#: ../../source/ref-changelog.md:1302 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-changelog.md:1294 +#: ../../source/ref-changelog.md:1303 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-changelog.md:1296 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:1307 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-changelog.md:1300 +#: ../../source/ref-changelog.md:1308 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-changelog.md:1302 +#: ../../source/ref-changelog.md:1309 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-changelog.md:1304 +#: ../../source/ref-changelog.md:1310 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:1306 +#: ../../source/ref-changelog.md:1311 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-changelog.md:1308 +#: ../../source/ref-changelog.md:1312 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1323 +#: ../../source/ref-changelog.md:1313 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1325 +#: ../../source/ref-changelog.md:1314 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:1327 -msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +#: ../../source/ref-changelog.md:1316 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-changelog.md:1329 +#: ../../source/ref-changelog.md:1320 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-changelog.md:1346 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:1322 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/ref-changelog.md:1350 +#: ../../source/ref-changelog.md:1324 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-changelog.md:1351 -msgid "Improved documentation" +#: ../../source/ref-changelog.md:1326 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-changelog.md:1352 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:1328 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-changelog.md:1353 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:1330 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/ref-changelog.md:1354 +#: ../../source/ref-changelog.md:1332 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-changelog.md:1355 +#: ../../source/ref-changelog.md:1334 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-changelog.md:1357 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:1336 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-changelog.md:1359 +#: ../../source/ref-changelog.md:1338 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-changelog.md:1361 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:1340 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 -msgid "Important changes:" +#: ../../source/ref-changelog.md:1342 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-changelog.md:1365 +#: ../../source/ref-changelog.md:1344 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-changelog.md:1366 +#: ../../source/ref-changelog.md:1346 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-changelog.md:1367 +#: ../../source/ref-changelog.md:1348 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-changelog.md:1369 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:1350 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-changelog.md:1371 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:1352 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-changelog.md:1373 +#: ../../source/ref-changelog.md:1354 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-changelog.md:1374 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:1356 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:1375 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:1358 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:1376 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:1360 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:1377 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:1362 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:1381 +#: ../../source/ref-changelog.md:1364 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-changelog.md:1382 +#: ../../source/ref-changelog.md:1368 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:1383 +#: ../../source/ref-changelog.md:1369 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1370 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-changelog.md:1385 +#: ../../source/ref-changelog.md:1371 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:1372 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:1373 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-example-projects.rst:9 -msgid "The following examples are available as standalone projects." +#: ../../source/ref-changelog.md:1374 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-example-projects.rst:12 -msgid "Quickstart TensorFlow/Keras" +#: ../../source/ref-changelog.md:1378 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:1380 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-example-projects.rst:17 -msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +#: ../../source/ref-changelog.md:1382 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-example-projects.rst:19 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:1386 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-example-projects.rst:20 +#: ../../source/ref-changelog.md:1388 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-example-projects.rst:24 -#: ../../source/tutorial-quickstart-pytorch.rst:4 -msgid "Quickstart PyTorch" +#: ../../source/ref-changelog.md:1390 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:1392 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:1394 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-example-projects.rst:31 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/ref-changelog.md:1395 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-example-projects.rst:34 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/ref-changelog.md:1397 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:1399 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:1401 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-example-projects.rst:40 +#: ../../source/ref-changelog.md:1403 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-example-projects.rst:44 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/ref-changelog.md:1407 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:1408 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-example-projects.rst:49 +#: ../../source/ref-changelog.md:1409 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-example-projects.rst:51 +#: ../../source/ref-changelog.md:1410 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:1411 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:1415 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-faq.rst:9 +#: ../../source/ref-changelog.md:1417 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:1419 msgid "" -"`Flower simulation PyTorch " -"`_" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-faq.rst:12 +#: ../../source/ref-changelog.md:1421 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:1423 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-faq.rst:16 +#: ../../source/ref-changelog.md:1425 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:1427 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-faq.rst:20 +#: ../../source/ref-changelog.md:1431 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "" -"`Android Kotlin example `_" +#: ../../source/ref-changelog.md:1433 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-faq.rst:23 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:1436 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:1438 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-faq.rst:27 +#: ../../source/ref-changelog.md:1440 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-faq.rst:30 -msgid "`FLock: A Decentralised AI Training Platform `_." +#: ../../source/ref-changelog.md:1442 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-faq.rst:30 -msgid "Contribute to on-chain training the model and earn rewards." +#: ../../source/ref-changelog.md:1444 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-faq.rst:31 -msgid "Local blockchain with federated learning simulation." +#: ../../source/ref-changelog.md:1446 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:1448 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-changelog.md:1450 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-faq.rst:34 -msgid "" -"`Flower meets KOSMoS `_." +#: ../../source/ref-changelog.md:1452 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-changelog.md:1454 msgid "" -"`Flower meets Talan blog post `_ ." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-faq.rst:36 +#: ../../source/ref-changelog.md:1458 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/ref-changelog.md:1460 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:1462 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:1464 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:1466 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:1470 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:1472 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:1474 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:1493 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:1497 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/ref-changelog.md:1499 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:1503 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:1505 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." -msgstr "" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:1507 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:1509 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:1511 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:1526 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:1528 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:1530 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:1532 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-telemetry.md:44 -msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +#: ../../source/ref-changelog.md:1549 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:1553 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-telemetry.md:48 -msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +#: ../../source/ref-changelog.md:1554 +msgid "Improved documentation" msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/ref-changelog.md:1555 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:1556 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:1557 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:1558 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/ref-changelog.md:1560 +msgid "Bugfix:" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:1562 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +#: ../../source/ref-changelog.md:1564 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:4 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:1566 ../../source/ref-changelog.md:1582 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:9 +#: ../../source/ref-changelog.md:1568 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:11 +#: ../../source/ref-changelog.md:1569 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:4 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:1570 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:6 -msgid "" -"In this federated learning tutorial we will learn how to train a " -"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `." +#: ../../source/ref-changelog.md:1572 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 -msgid "Then, clone the code example directly from GitHub:" +#: ../../source/ref-changelog.md:1574 +msgid "Incompatible changes:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:18 +#: ../../source/ref-changelog.md:1576 msgid "" -"This will create a new directory called `quickstart-fastai` containing " -"the following files:" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:31 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 -msgid "Next, activate your environment, then run:" +#: ../../source/ref-changelog.md:1577 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:41 -msgid "" -"This example by default runs the Flower Simulation Engine, creating a " -"federation of 10 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" Let's run the project:" +#: ../../source/ref-changelog.md:1578 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:54 -#: ../../source/tutorial-quickstart-huggingface.rst:61 -#: ../../source/tutorial-quickstart-mlx.rst:60 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 -#: ../../source/tutorial-quickstart-pytorch.rst:62 -#: ../../source/tutorial-quickstart-tensorflow.rst:62 -msgid "With default arguments you will see an output like this one:" +#: ../../source/ref-changelog.md:1579 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:98 -#: ../../source/tutorial-quickstart-huggingface.rst:112 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 -#: ../../source/tutorial-quickstart-pytorch.rst:103 -#: ../../source/tutorial-quickstart-tensorflow.rst:103 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +#: ../../source/ref-changelog.md:1580 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:108 +#: ../../source/ref-changelog.md:1584 msgid "" -"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " -"in the Flower GitHub repository." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:1585 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:4 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:1586 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:6 +#: ../../source/ref-changelog.md:1587 msgid "" -"In this federated learning tutorial we will learn how to train a large " -"language model (LLM) on the `IMDB " -"`_ dataset using Flower" -" and the 🤗 Hugging Face Transformers library. It is recommended to create" -" a virtual environment and run everything within a :doc:`virtualenv " -"`." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:1588 msgid "" -"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " -"project. It will generate all the files needed to run, by default with " -"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " -"The dataset will be partitioned using |flowerdatasets|_'s " -"|iidpartitioner|_." +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:17 -#: ../../source/tutorial-quickstart-mlx.rst:17 -#: ../../source/tutorial-quickstart-pytorch.rst:18 -#: ../../source/tutorial-quickstart-tensorflow.rst:18 -msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:25 +#: ../../source/ref-example-projects.rst:4 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``HuggingFace``), give a name to your " -"project, and type in your developer name:" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -#: ../../source/tutorial-quickstart-mlx.rst:32 -#: ../../source/tutorial-quickstart-pytorch.rst:34 -#: ../../source/tutorial-quickstart-tensorflow.rst:34 -msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:47 -#: ../../source/tutorial-quickstart-mlx.rst:46 -#: ../../source/tutorial-quickstart-pytorch.rst:48 -#: ../../source/tutorial-quickstart-tensorflow.rst:48 +#: ../../source/ref-example-projects.rst:12 +msgid "Quickstart TensorFlow/Keras" +msgstr "" + +#: ../../source/ref-example-projects.rst:14 msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:54 -#: ../../source/tutorial-quickstart-pytorch.rst:55 -#: ../../source/tutorial-quickstart-tensorflow.rst:55 -msgid "To run the project, do:" +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:102 -msgid "You can also run the project with GPU as follows:" +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:109 +#: ../../source/ref-example-projects.rst:20 msgid "" -"This will use the default arguments where each ``ClientApp`` will use 2 " -"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:120 -#: ../../source/tutorial-quickstart-mlx.rst:110 -#: ../../source/tutorial-quickstart-pytorch.rst:111 -msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:115 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "The Data" +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:126 +#: ../../source/ref-example-projects.rst:29 msgid "" -"This tutorial uses |flowerdatasets|_ to easily download and partition the" -" `IMDB `_ dataset. In " -"this example you'll make use of the |iidpartitioner|_ to generate " -"``num_partitions`` partitions. You can choose |otherpartitioners|_ " -"available in Flower Datasets. To tokenize the text, we will also load the" -" tokenizer from the pre-trained Transformer model that we'll use during " -"training - more on that in the next section. Each ``ClientApp`` will call" -" this function to create dataloaders with the data that correspond to " -"their data partition." +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:171 -#: ../../source/tutorial-quickstart-mlx.rst:155 -#: ../../source/tutorial-quickstart-pytorch.rst:150 -#: ../../source/tutorial-quickstart-tensorflow.rst:139 -msgid "The Model" +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:173 -msgid "" -"We will leverage 🤗 Hugging Face to federate the training of language " -"models over multiple clients using Flower. More specifically, we will " -"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " -"classification over the dataset of IMDB ratings. The end goal is to " -"detect if a movie rating is positive or negative. If you have access to " -"larger GPUs, feel free to use larger models!" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:185 +#: ../../source/ref-example-projects.rst:36 msgid "" -"Note that here, ``model_name`` is a string that will be loaded from the " -"``Context`` in the ClientApp and ServerApp." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:188 +#: ../../source/ref-example-projects.rst:38 msgid "" -"In addition to loading the pretrained model weights and architecture, we " -"also include two utility functions to perform both training (i.e. " -"``train()``) and evaluation (i.e. ``test()``) using the above model. " -"These functions should look fairly familiar if you have some prior " -"experience with PyTorch. Note these functions do not have anything " -"specific to Flower. That being said, the training function will normally " -"be called, as we'll see later, from a Flower client passing its own data." -" In summary, your clients can use standard training/testing functions to " -"perform local training or evaluation:" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:228 -#: ../../source/tutorial-quickstart-mlx.rst:199 -#: ../../source/tutorial-quickstart-pytorch.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:168 -msgid "The ClientApp" +#: ../../source/ref-example-projects.rst:40 +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:230 -msgid "" -"The main changes we have to make to use 🤗 Hugging Face with Flower will " -"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " -"the hood, the ``transformers`` library uses PyTorch, which means we can " -"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" -" the :doc:`Quickstart PyTorch ` tutorial. As" -" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" -" and represented as a list of NumPy arrays. The ``set_weights()`` " -"function that's the opposite: given a list of NumPy arrays it applies " -"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:241 -#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/ref-example-projects.rst:46 msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:257 -#: ../../source/tutorial-quickstart-pytorch.rst:250 +#: ../../source/ref-example-projects.rst:49 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:283 +#: ../../source/ref-example-projects.rst:51 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additional hyperparameters in ``pyproject.toml`` and access them here." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:316 -#: ../../source/tutorial-quickstart-mlx.rst:361 -#: ../../source/tutorial-quickstart-pytorch.rst:307 -#: ../../source/tutorial-quickstart-tensorflow.rst:232 -msgid "The ServerApp" +#: ../../source/ref-faq.rst:2 +msgid "FAQ" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:318 +#: ../../source/ref-faq.rst:4 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"|serverappcomponents|_ as opposed to a |client|_ In this example we use " -"the `FedAvg` strategy. To it we pass a randomly initialized model that " -"will server as the global model to federated. Note that the value of " -"``fraction_fit`` is read from the run config. You can find the default " -"value defined in the ``pyproject.toml``." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:356 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system for an LLM." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:361 +#: ../../source/ref-faq.rst:9 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_hf_link|_ in the Flower GitHub repository. For a " -"comprehensive example of a federated fine-tuning of an LLM with Flower, " -"refer to the |flowertune|_ example in the Flower GitHub repository." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-faq.rst:11 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:4 -msgid "Quickstart iOS" +#: ../../source/ref-faq.rst:12 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:9 -msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-faq.rst:16 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:20 +#: ../../source/ref-faq.rst:20 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:26 +#: ../../source/ref-faq.rst:22 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:33 -msgid "Or Poetry:" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:40 -#: ../../source/tutorial-quickstart-scikitlearn.rst:43 -#: ../../source/tutorial-quickstart-xgboost.rst:65 -msgid "Flower Client" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:42 +#: ../../source/ref-faq.rst:27 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:80 -msgid "" -"Let's create a new application project in Xcode and add ``flwr`` as a " -"dependency in your project. For our application, we will store the logic " -"of our app in ``FLiOSModel.swift`` and the UI elements in " -"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" -" quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:86 -msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:94 -msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" -" ``MLBatchProvider`` object. The preprocessing is done inside " -"``DataLoader.swift``." +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:112 +#: ../../source/ref-faq.rst:32 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -"``MLModelInspect``." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:118 +#: ../../source/ref-faq.rst:33 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:133 +#: ../../source/ref-faq.rst:34 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function ``startFlwrGRPC``." +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:141 +#: ../../source/ref-faq.rst:35 msgid "" -"That's it for the client. We only have to implement ``Client`` or call " -"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" -" ``hostname`` and ``port`` tells the client which server to connect to. " -"This can be done by entering the hostname and port in the application " -"before clicking the start button to start the federated learning process." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:148 -#: ../../source/tutorial-quickstart-scikitlearn.rst:179 -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "Flower Server" +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:150 +#: ../../source/ref-faq.rst:36 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -"``server.py``, import Flower and start the server:" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:254 -msgid "Train the model, federated!" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:163 -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-telemetry.md:3 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:171 +#: ../../source/ref-telemetry.md:5 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:177 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in ``examples/ios``." +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:4 -msgid "Quickstart JAX" -msgstr "" - -#: ../../source/tutorial-quickstart-jax.rst:9 +#: ../../source/ref-telemetry.md:11 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:20 +#: ../../source/ref-telemetry.md:12 msgid "" -"Before we start building our JAX example, we need install the packages " -"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" -msgstr "" - -#: ../../source/tutorial-quickstart-jax.rst:28 -msgid "Linear Regression with JAX" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:30 +#: ../../source/ref-telemetry.md:13 msgid "" -"We begin with a brief description of the centralized training code based " -"on a ``Linear Regression`` model. If you want a more in-depth explanation" -" of what's going on then have a look at the official `JAX documentation " -"`_." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:34 +#: ../../source/ref-telemetry.md:14 msgid "" -"Let's create a new file called ``jax_training.py`` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " -"imported. In addition, we need to import ``sklearn`` since we use " -"``make_regression`` for the dataset and ``train_test_split`` to split the" -" dataset into a training and test set. You can see that we do not yet " -"import the ``flwr`` package for federated learning. This will be done " -"later." +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:51 -msgid "The ``load_data()`` function loads the mentioned training and test sets." +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:63 +#: ../../source/ref-telemetry.md:18 msgid "" -"The model architecture (a very simple ``Linear Regression`` model) is " -"defined in ``load_model()``." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:73 +#: ../../source/ref-telemetry.md:24 msgid "" -"We now need to define the training (function ``train()``), which loops " -"over the training set and measures the loss (function ``loss_fn()``) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a ``grad()`` function (defined in the ``main()`` " -"function and called in ``train()``)." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:95 -msgid "" -"The evaluation of the model is defined in the function ``evaluation()``. " -"The function takes all test examples and measures the loss of the linear " -"regression model." +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:107 +#: ../../source/ref-telemetry.md:30 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the ``jax.grad()`` function is defined in " -"``main()`` and passed to ``train()``." +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:126 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:132 +#: ../../source/ref-telemetry.md:34 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:137 -msgid "JAX meets Flower" +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/ref-telemetry.md:38 msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -"``jax_training.py`` for the *clients* that are connected to the *server*." -" The *server* sends model parameters to the clients. The *clients* run " -"the training and update the parameters. The updated parameters are sent " -"back to the *server*, which averages all received parameter updates. This" -" describes one round of the federated learning process, and we repeat " -"this for multiple rounds." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:167 +#: ../../source/ref-telemetry.md:40 msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined JAX training in ``jax_training.py``. Our " -"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " -"update the parameters on our JAX model:" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/ref-telemetry.md:42 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" -" than ``Client`` if you use a framework with good NumPy interoperability " -"(like JAX) because it avoids some of the boilerplate that would otherwise" -" be necessary. ``FlowerClient`` needs to implement four methods, two " -"methods for getting/setting model parameters, one method for training the" -" model, and one method for testing the model:" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:194 -msgid "``set_parameters (optional)``" +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:193 -msgid "transform parameters to NumPy ``ndarray``'s" +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:203 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:208 -msgid "return the local loss to the server" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:210 +#: ../../source/ref-telemetry.md:52 msgid "" -"The challenging part is to transform the JAX model parameters from " -"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " -"`NumPyClient`." +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:213 +#: ../../source/ref-telemetry.md:58 msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``evaluate()`` previously defined in " -"``jax_training.py``. So what we really do here is we tell Flower through " -"our ``NumPyClient`` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give " -"you a better understanding of the data types that get passed around." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:286 -msgid "Having defined the federation process, we can run it." +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:315 +#: ../../source/ref-telemetry.md:66 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:321 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:325 -msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:4 -msgid "Quickstart MLX" +#: ../../source/tutorial-quickstart-android.rst:11 +msgid "" +"The experimental Flower Android SDK is not compatible with the latest " +"version of Flower. Android support is currently being reworked and will " +"be released in 2025." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:6 +#: ../../source/tutorial-quickstart-android.rst:14 msgid "" -"In this federated learning tutorial we will learn how to train simple MLP" -" on MNIST using Flower and MLX. It is recommended to create a virtual " -"environment and run everything within a :doc:`virtualenv `." +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new Android SDK is released." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:10 +#: ../../source/tutorial-quickstart-android.rst:17 msgid "" -"Let's use `flwr new` to create a complete Flower+MLX project. It will " -"generate all the files needed to run, by default with the Simulation " -"Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:25 +#: ../../source/tutorial-quickstart-android.rst:19 msgid "" -"Then, run the command below. You will be prompted to select of the " -"available templates (choose ``MLX``), give a name to your project, and " -"type in your developer name:" +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:53 -msgid "To run the project do:" +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:102 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" -"You can also override the parameters defined in " -"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:116 -msgid "" -"We will use `Flower Datasets `_ to " -"easily download and partition the `MNIST` dataset. In this example you'll" -" make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets:" +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:157 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"We define the model as in the `centralized MLX example " -"`_, it's a " -"simple MLP:" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:180 -msgid "" -"We also define some utility functions to test our model and to iterate " -"over batches." +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +msgid "Next, activate your environment, then run:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:201 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"The main changes we have to make to use `MLX` with `Flower` will be found" -" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " -"doesn't provide an easy way to convert the model parameters into a list " -"of ``np.array`` objects (the format we need for the serialization of the " -"messages to work)." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:206 -msgid "The way MLX stores its parameters is as follows:" +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-jax.rst:60 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-scikitlearn.rst:59 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +#: ../../source/tutorial-quickstart-xgboost.rst:492 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:219 +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-jax.rst:102 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-scikitlearn.rst:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/tutorial-quickstart-xgboost.rst:537 msgid "" -"Therefore, to get our list of ``np.array`` objects, we need to extract " -"each array and convert them into a NumPy array:" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:228 +#: ../../source/tutorial-quickstart-fastai.rst:108 msgid "" -"For the ``set_params()`` function, we perform the reverse operation. We " -"receive a list of NumPy arrays and want to convert them into MLX " -"parameters. Therefore, we iterate through pairs of parameters and assign " -"them to the `weight` and `bias` keys of each layer dict:" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:243 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset:" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:259 -msgid "" -"Here, after updating the parameters, we perform the training as in the " -"centralized case, and return the new parameters." +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:262 -msgid "And for the ``evaluate()`` method of the client:" +#: ../../source/tutorial-quickstart-huggingface.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"We also begin by updating the parameters with the ones sent by the " -"server, and then we compute the loss and accuracy using the functions " -"defined above. In the constructor of the ``FlowerClient`` we instantiate " -"the `MLP` model as well as other components such as the optimizer." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:277 -msgid "Putting everything together we have:" +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:331 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that " -"``context`` enables you to get access to hyperparemeters defined in " -"``pyproject.toml`` to configure the run. In this tutorial we access, " -"among other hyperparameters, the ``local-epochs`` setting to control the " -"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " -"method." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:363 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-jax.rst:32 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " -"an identical signature to that of ``client_fn()``, but the return type is" -" `ServerAppComponents `_ as " -"opposed to `Client `_. In this example we use the " -"``FedAvg`` strategy." +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:386 -#: ../../source/tutorial-quickstart-pytorch.rst:344 -#: ../../source/tutorial-quickstart-tensorflow.rst:266 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-jax.rst:46 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:390 -msgid "" -"Check the `source code `_ of the extended version of this tutorial in ``examples" -"/quickstart-mlx`` in the Flower GitHub repository." +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +#: ../../source/tutorial-quickstart-xgboost.rst:485 +msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:4 -msgid "Quickstart Pandas" +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-jax.rst:110 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:9 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-jax.rst:114 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "The Data" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:11 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-jax.rst:128 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-scikitlearn.rst:138 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:6 +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" -"In this federated learning tutorial we will learn how to train a " -"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:11 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" -"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" -" generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:26 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``PyTorch``), give a name to your project, " -"and type in your developer name:" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:117 -msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create dataloaders with the data" -" that correspond to their data partition." +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#: ../../source/tutorial-quickstart-xgboost.rst:149 +msgid "The ClientApp" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:152 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" -"We defined a simple Convolutional Neural Network (CNN), but feel free to " -"replace it with a more sophisticated model if you'd like:" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" -"In addition to defining the model architecture, we also include two " -"utility functions to perform both training (i.e. ``train()``) and " -"evaluation (i.e. ``test()``) using the above model. These functions " -"should look fairly familiar if you have some prior experience with " -"PyTorch. Note these functions do not have anything specific to Flower. " -"That being said, the training function will normally be called, as we'll " -"see later, from a Flower client passing its own data. In summary, your " -"clients can use standard training/testing functions to perform local " -"training or evaluation:" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"The main changes we have to make to use `PyTorch` with `Flower` will be " -"found in the ``get_weights()`` and ``set_weights()`` functions. In " -"``get_weights()`` PyTorch model parameters are extracted and represented " -"as a list of NumPy arrays. The ``set_weights()`` function that's the " -"oposite: given a list of NumPy arrays it applies them to an existing " -"PyTorch model. Doing this in fairly easy in PyTorch." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:282 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " "`context` enables you to get access to hyperparemeters defined in your " "``pyproject.toml`` to configure the run. In this tutorial we access the " -"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " "will perform when running the ``fit()`` method. You could define " -"additioinal hyperparameters in ``pyproject.toml`` and access them here." +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:309 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-jax.rst:246 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-scikitlearn.rst:255 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#: ../../source/tutorial-quickstart-xgboost.rst:269 +msgid "The ServerApp" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will server as " -"the global model to federated. Note that the value of ``fraction_fit`` is" -" read from the run config. You can find the default value defined in the " -"``pyproject.toml``." +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:348 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" -"Check the `source code `_ of the extended version of this tutorial in " -"``examples/quickstart-pytorch`` in the Flower GitHub repository." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:354 -#: ../../source/tutorial-quickstart-tensorflow.rst:278 -msgid "Video tutorial" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:358 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"The video shown below shows how to setup a PyTorch + Flower project using" -" our previously recommended APIs. A new video tutorial will be released " -"that shows the new APIs (as the content above does)" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 -msgid "Quickstart PyTorch Lightning" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 -msgid "" -"In this federated learning tutorial we will learn how to train an " -"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 -msgid "" -"This will create a new directory called `quickstart-pytorch-lightning` " -"containing the following files:" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"By default, Flower Simulation Engine will be started and it will create a" -" federation of 4 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" To run the project, do:" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 -msgid "" -"Each simulated `ClientApp` (two per round) will also log a summary of " -"their local training process. Expect this output to be similar to:" +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#: ../../source/tutorial-quickstart-ios.rst:11 msgid "" -"Check the `source code `_ of this tutorial in ``examples" -"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +"The experimental Flower iOS SDK is not compatible with the latest version" +" of Flower. iOS support is currently being reworked and will be released " +"in 2025." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-quickstart-ios.rst:14 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new iOS SDK is released." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:4 -msgid "Quickstart scikit-learn" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"In this tutorial, we will learn how to train a ``Logistic Regression`` " -"model on MNIST using Flower and scikit-learn." +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:25 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:28 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +#: ../../source/tutorial-quickstart-ios.rst:34 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:30 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#: ../../source/tutorial-quickstart-ios.rst:41 +msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:36 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/tutorial-quickstart-ios.rst:48 +msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#: ../../source/tutorial-quickstart-ios.rst:50 msgid "" "Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within ``utils.py``. The " -"``utils.py`` contains different functions defining all the machine " -"learning basics:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -msgid "``get_model_parameters()``" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid "``set_model_params()``" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 -msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "``set_initial_params()``" +#: ../../source/tutorial-quickstart-ios.rst:88 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#: ../../source/tutorial-quickstart-ios.rst:102 msgid "" -"Please check out ``utils.py`` `here " -"`_ for more details. The pre-defined functions are used in" -" the ``client.py`` and imported. The ``client.py`` also requires to " -"import several packages such as Flower and scikit-learn:" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +#: ../../source/tutorial-quickstart-ios.rst:120 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -"``FederatedDataset.load_partition()`` method loads the partitioned " -"training set for each partition ID defined in the ``--partition-id`` " -"argument." +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#: ../../source/tutorial-quickstart-ios.rst:126 msgid "" -"Next, the logistic regression model is defined and initialized with " -"``utils.set_initial_params()``." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"The Flower server interacts with clients through an interface called " -"``Client``. When the server selects a particular client for training, it " -"sends training instructions over the network. The client receives those " -"instructions and calls one of the ``Client`` methods to run your code " -"(i.e., to fit the logistic regression we defined earlier)." +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:149 msgid "" -"Flower provides a convenience class called ``NumPyClient`` which makes it" -" easier to implement the ``Client`` interface when your workload uses " -"scikit-learn. Implementing ``NumPyClient`` usually means defining the " -"following methods (``set_parameters`` is optional though):" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "return the model weight as a list of NumPy ndarrays" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 -msgid "``set_parameters`` (optional)" +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#: ../../source/tutorial-quickstart-ios.rst:158 msgid "" -"update the local model weights with the parameters received from the " -"server" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:133 -msgid "is directly imported with ``utils.set_model_params()``" +#: ../../source/tutorial-quickstart-ios.rst:169 +msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:135 -msgid "set the local model weights" +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "train the local model" +#: ../../source/tutorial-quickstart-ios.rst:179 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:137 -msgid "return the updated local model weights" +#: ../../source/tutorial-quickstart-ios.rst:185 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -msgid "test the local model" +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:141 -msgid "The methods can be implemented in the following way:" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#: ../../source/tutorial-quickstart-jax.rst:6 msgid "" -"We can now create an instance of our class ``MnistClient`` and add one " -"line to actually run this client:" +"In this federated learning tutorial we will learn how to train a linear " +"regression model using Flower and `JAX " +"`_. It is recommended to create a " +"virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#: ../../source/tutorial-quickstart-jax.rst:11 msgid "" -"That's it for the client. We only have to implement ``Client`` or " -"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" -" client of type ``NumPyClient`` you'll need to first call its " -"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " -"which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" pass to the client." +"Let's use ``flwr new`` to create a complete Flower+JAX project. It will " +"generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_. A random " +"regression dataset will be loaded from scikit-learn's |makeregression|_ " +"function." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:24 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``JAX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -msgid "``server.py``, import Flower and start the server:" +#: ../../source/tutorial-quickstart-jax.rst:116 +msgid "" +"This tutorial uses scikit-learn's |makeregression|_ function to generate " +"a random regression problem." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#: ../../source/tutorial-quickstart-jax.rst:130 msgid "" -"The number of federated learning rounds is set in ``fit_round()`` and the" -" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " -"is called after each federated learning round and gives you information " -"about loss and accuracy. Note that we also make use of Flower Datasets " -"here to load the test split of the MNIST dataset for server-side " -"evaluation." +"We defined a simple linear regression model to demonstrate how to create " +"a JAX model, but feel free to replace it with a more sophisticated JAX " +"model if you'd like, (such as with NN-based `Flax " +"`_):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:141 msgid "" -"The ``main`` contains the server-side parameter initialization " -"``utils.set_initial_params()`` as well as the aggregation strategy " -"``fl.server.strategy:FedAvg()``. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``evaluation()``) using the above model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +#: ../../source/tutorial-quickstart-jax.rst:172 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"The main changes we have to make to use JAX with Flower will be found in " +"the ``get_params()`` and ``set_params()`` functions. In ``get_params()``," +" JAX model parameters are extracted and represented as a list of NumPy " +"arrays. The ``set_params()`` function is the opposite: given a list of " +"NumPy arrays it applies them to an existing JAX model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:264 -#: ../../source/tutorial-quickstart-xgboost.rst:598 +#: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"The ``get_params()`` and ``set_params()`` functions here are conceptually" +" similar to the ``get_weights()`` and ``set_weights()`` functions that we" +" defined in the :doc:`QuickStart PyTorch ` " +"tutorial." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "Open another terminal and start the second client:" +#: ../../source/tutorial-quickstart-jax.rst:227 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:277 -#: ../../source/tutorial-quickstart-xgboost.rst:611 +#: ../../source/tutorial-quickstart-jax.rst:248 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the ``FedAvg`` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``input_dim`` is read from the run config. You can find the default value" +" defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#: ../../source/tutorial-quickstart-jax.rst:276 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in ``examples/sklearn-logreg-" -"mnist``." +"learning system for JAX with Flower!" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-quickstart-jax.rst:281 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a CNN model on CIFAR-10." +"Check the source code of the extended version of this tutorial in " +"|quickstart_jax_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:4 -msgid "Quickstart TensorFlow" +#: ../../source/tutorial-quickstart-mlx.rst:4 +msgid "Quickstart MLX" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#: ../../source/tutorial-quickstart-mlx.rst:6 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " -"all, it is recommended to create a virtual environment and run everything" -" within a :doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:11 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " -"will generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " "`_. The " "dataset will be partitioned using Flower Dataset's `IidPartitioner " @@ -22250,666 +22373,1035 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:26 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``TensorFlow``), give a name to your project," -" and type in your developer name:" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" " to generate `num_partitions` partitions. You can choose `other " "partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create the ``NumPy`` arrays that" -" correspond to their data partition." +"api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:141 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" -"Next, we need a model. We defined a simple Convolutional Neural Network " -"(CNN), but feel free to replace it with a more sophisticated model if " -"you'd like:" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" -"With `TensorFlow`, we can use the built-in ``get_weights()`` and " -"``set_weights()`` functions, which simplifies the implementation with " -"`Flower`. The rest of the functionality in the ClientApp is directly " -"inspired by the centralized case. The ``fit()`` method in the client " -"trains the model using the local dataset. Similarly, the ``evaluate()`` " -"method is used to evaluate the model received on a held-out validation " -"set that the client might have:" +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:203 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparameters defined in your " -"``pyproject.toml`` to configure the run. For example, in this tutorial we" -" access the `local-epochs` setting to control the number of epochs a " -"``ClientApp`` will perform when running the ``fit()`` method, in addition" -" to `batch-size`. You could define additional hyperparameters in " -"``pyproject.toml`` and access them here." +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:234 -msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will serve as " -"the global model to federate." +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_tf_link|_ in the Flower GitHub repository." +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:282 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" -"The video shown below shows how to setup a TensorFlow + Flower project " -"using our previously recommended APIs. A new video tutorial will be " -"released that shows the new APIs (as the content above does)" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:4 -msgid "Quickstart XGBoost" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:13 -msgid "Federated XGBoost" +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:15 -msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:21 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "Why federated XGBoost?" +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:31 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"Congratulations! You've successfully built and run your first federated " +"learning system." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:46 -msgid "Environment Setup" +#: ../../source/tutorial-quickstart-mlx.rst:390 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:48 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:51 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" -" go ahead and install ``xgboost``:" +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:67 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:71 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " -"and other related functions:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:99 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/tutorial-quickstart-pytorch.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:101 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:115 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"In this example, we split the dataset into 30 partitions with uniform " -"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " -"partition for the given client based on ``partition_id``:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:135 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for ``xgboost`` package." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:149 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" -"The functions of ``train_test_split`` and " -"``transform_dataset_to_dmatrix`` are defined as below:" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:190 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" -"The ``num_local_round`` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " -"metric." +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:195 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define ``XgbClient`` class inherited from " -"``fl.client.Client``." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:219 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" -"All required parameters defined above are passed to ``XgbClient``'s " -"constructor." +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 -msgid "" -"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " -"insides ``XgbClient`` class as follows." +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +msgid "Video tutorial" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use ``get_parameters`` " -"and ``set_parameters`` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in ``get_parameters`` when it is " -"called by the server at the first round." +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:278 -msgid "" -"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " -"first set of trees. From the second round, we load the global model sent " -"from server to new build Booster object, and then update model weights on" -" local training data with function ``local_boost`` as follows:" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:298 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" -"Given ``num_local_round``, we update trees by calling " -"``bst_input.update`` method. After training, the last " -"``N=num_local_round`` trees will be extracted to send to the server." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:330 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" -"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" -" function to conduct evaluation on valid set. The AUC value will be " -"returned." +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:333 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"Now, we can create an instance of our class ``XgbClient`` and add one " -"line to actually run this client:" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:350 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 msgid "" -"That's it for the client. We only have to implement ``Client`` and call " -"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " -"client which server to connect to. In our case we can run the server and " -"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" -" run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" point the client at." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:360 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:364 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"In a file named ``server.py``, import Flower and FedXgbBagging from " -"``flwr.server.strategy``." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:367 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:401 +#: ../../source/tutorial-quickstart-scikitlearn.rst:6 msgid "" -"We use two clients for this example. An ``evaluate_metrics_aggregation`` " -"function is defined to collect and wighted average the AUC values from " -"clients. The ``config_func`` function is to return the current FL round " -"number to client's ``fit()`` and ``evaluate()`` methods." +"In this federated learning tutorial we will learn how to train a Logistic" +" Regression on MNIST using Flower and scikit-learn. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:406 -msgid "Then, we start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+scikit-learn project. " +"It will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_ The dataset " +"will be partitioned using |flowerdatasets|_'s |iidpartitioner|_" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:418 -msgid "Tree-based bagging aggregation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``sklearn``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:420 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `MNIST `_ dataset. In this" +" example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. Each ``ClientApp`` will call this function " +"to create dataloaders with the data that correspond to their data " +"partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:422 +#: ../../source/tutorial-quickstart-scikitlearn.rst:140 msgid "" -"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " -"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " -"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " -"``evaluate`` methods as follows:" +"We define the |logisticregression|_ model from scikit-learn in the " +"``get_model()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:519 +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 msgid "" -"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " -"trees by calling ``aggregate()`` function:" +"To perform the training and evaluation, we will make use of the " +"``.fit()`` and ``.score()`` methods available in the " +"``LogisticRegression`` class." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:579 +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -"``_get_tree_nums``. Then, the fetched information will be aggregated. " -"After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"The main changes we have to make to use scikit-learn with Flower will be " +"found in the ``get_model_params()``, ``set_model_params()``, and " +"``set_initial_params()`` functions. In ``get_model_params()``, the " +"coefficients and intercept of the logistic regression model are extracted" +" and represented as a list of NumPy arrays. In ``set_model_params()``, " +"that's the opposite: given a list of NumPy arrays it applies them to an " +"existing ``LogisticRegression`` model. Finally, in " +"``set_initial_params()``, we initialize the model parameters based on the" +" MNIST dataset, which has 10 classes (corresponding to the 10 digits) and" +" 784 features (corresponding to the size of the MNIST image array, which " +"is 28 × 28). Doing this is fairly easy in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:584 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"The rest of the functionality is directly inspired by the centralized " +"case:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:588 -msgid "Launch Federated XGBoost!" +#: ../../source/tutorial-quickstart-scikitlearn.rst:226 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"``context`` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:664 +#: ../../source/tutorial-quickstart-scikitlearn.rst:257 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in ``metrics_distributed``." -" One can see that the average AUC increases over FL rounds." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a zero-initialized model that will " +"server as the global model to be federated. Note that the values of " +"``num-server-rounds``, ``penalty``, and ``local-epochs`` are read from " +"the run config. You can find the default values defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:668 +#: ../../source/tutorial-quickstart-scikitlearn.rst:295 msgid "" -"The full `source code `_ for this example can be found in ``examples" -"/xgboost-quickstart``." +"Congratulations! You've successfully built and run your first federated " +"learning system in scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:673 -msgid "Comprehensive Federated XGBoost" +#: ../../source/tutorial-quickstart-scikitlearn.rst:300 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_sklearn_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:675 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:685 -msgid "Cyclic training" +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:687 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:733 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" -"The customised ``ClientManager`` samples all available clients in each FL" -" round based on the order of connection to the server. Then, we define a " -"new strategy ``FedXgbCyclic`` in " -"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:775 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" -"Unlike the original ``FedAvg``, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding ``aggregate_fit``." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:778 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" -"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" -" ensure the clients to be sequentially selected given FL round:" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:840 -msgid "Customised data partitioning" +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:842 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" -"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " -"instantiate the data partitioner based on the given ``num_partitions`` " -"and ``partitioner_type``. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data " -"quantity (uniform, linear, square, exponential)." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:873 -msgid "Customised centralised/distributed evaluation" +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:875 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" -"To facilitate centralised evaluation, we define a function in " -"``server_utils.py``:" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:907 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" -"This function returns a evaluation function which instantiates a " -"``Booster`` object and loads the global model weights to it. The " -"evaluation is conducted by calling ``eval_set()`` method, and the tested " -"AUC value is reported." +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:911 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the ``evaluate()`` method insides the " -"``XgbClient`` class in ``client_utils.py``." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:916 -msgid "Flower simulation" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:918 -msgid "" -"We also provide an example code (``sim.py``) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +#: ../../source/tutorial-quickstart-xgboost.rst:7 +msgid "XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:954 +#: ../../source/tutorial-quickstart-xgboost.rst:9 msgid "" -"After importing all required packages, we define a ``main()`` function to" -" perform the simulation process:" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1010 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a ``list``. After the simulation begins, the " -"clients won't need to pre-process their partitions again." +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1014 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/tutorial-quickstart-xgboost.rst:19 +msgid "Why Federated XGBoost?" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1065 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" -"After that, we start the simulation by calling " -"``fl.simulation.start_simulation``:" +"As the demand for data privacy and decentralized learning grows, there's " +"an increasing requirement to implement federated XGBoost systems for " +"specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1085 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" -"One of key parameters for ``start_simulation`` is ``client_fn`` which " -"returns a function to construct a client. We define it as follows:" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data privacy" +" is paramount. Given the robustness and efficiency of XGBoost, combining " +"it with federated learning offers a promising solution for these specific" +" challenges." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1126 -msgid "Arguments parser" +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1128 +#: ../../source/tutorial-quickstart-xgboost.rst:33 msgid "" -"In ``utils.py``, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. " -"Let's first see the sever side:" +"In this tutorial, we learn how to train a federated XGBoost model on the " +"HIGGS dataset using Flower and the ``xgboost`` package to perform a " +"binary classification task. We use a simple example (`full code xgboost-" +"quickstart `_) to demonstrate how federated XGBoost works, and then we " +"dive into a more complex comprehensive example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1175 +#: ../../source/tutorial-quickstart-xgboost.rst:42 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with ``--centralised-eval``, the sever " -"will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"It is recommended to create a virtual environment and run everything " +"within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1180 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-xgboost.rst:45 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1234 +#: ../../source/tutorial-quickstart-xgboost.rst:52 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting ``--centralised-eval``, as well as an option to perform " -"scaled learning rate based on the number of clients by setting " -"``--scaled-lr``." +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1239 -msgid "We also have an argument parser for simulation:" +#: ../../source/tutorial-quickstart-xgboost.rst:60 +msgid "The Configurations" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1317 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/tutorial-quickstart-xgboost.rst:62 +msgid "" +"We define all required configurations / hyper-parameters inside the " +"``pyproject.toml`` file:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1320 -msgid "Example commands" +#: ../../source/tutorial-quickstart-xgboost.rst:84 +msgid "" +"The ``local-epochs`` represents the number of iterations for local tree " +"boost. We use CPU for the training in default. One can assign it to a GPU" +" by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#: ../../source/tutorial-quickstart-xgboost.rst:91 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `HIGGS` dataset." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1329 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/tutorial-quickstart-xgboost.rst:105 +msgid "" +"In this example, we split the dataset into 20 partitions with uniform " +"distribution (`IidPartitioner `_)." +" Then, we load the partition for the given client based on " +"``partition_id``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1335 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/tutorial-quickstart-xgboost.rst:110 +msgid "" +"Subsequently, we train/test split using the given partition (client's " +"local data), and reformat data to DMatrix for the ``xgboost`` package." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#: ../../source/tutorial-quickstart-xgboost.rst:124 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" ``examples/xgboost-comprehensive``." +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/tutorial-quickstart-xgboost.rst:151 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Let's first see how we define " +"Flower client for XGBoost. We follow the general rule to define " +"``FlowerClient`` class inherited from ``fl.client.Client``." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/tutorial-quickstart-xgboost.rst:176 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and the Flower framework (`part 1 " -"`__) and we learned how strategies can be used to customize " -"the execution on both the server and the clients (`part 2 " -"`__)." +"All required parameters defined above are passed to ``FlowerClient``'s " +"constructor." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-xgboost.rst:178 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg using " -"the Flower framework, Flower Datasets, and PyTorch." +"Then, we override ``fit`` and ``evaluate`` methods insides " +"``FlowerClient`` class as follows." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/tutorial-quickstart-xgboost.rst:217 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Flower Discuss and the Flower Slack to connect, " -"ask questions, and get help: - `Join Flower Discuss " -"`__ We'd love to hear from you in the " -"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " -"Beginners``. - `Join Flower Slack `__ We'd " -"love to hear from you in the ``#introductions`` channel! If anything is " -"unclear, head over to the ``#questions`` channel." +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``_local_boost`` as follows:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 -msgid "Let's build a new ``Strategy`` from scratch! 🌼" +#: ../../source/tutorial-quickstart-xgboost.rst:237 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 -msgid "Preparation" +#: ../../source/tutorial-quickstart-xgboost.rst:265 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +#: ../../source/tutorial-quickstart-xgboost.rst:271 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"After the local training on clients, clients' model updates are sent to " +"the *server*, which aggregates them to produce a better model. Finally, " +"the *server* sends this improved model version back to each *client* to " +"complete a federated round." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 -msgid "Installing dependencies" +#: ../../source/tutorial-quickstart-xgboost.rst:275 +msgid "" +"In the file named ``server_app.py``, we define a strategy for XGBoost " +"bagging aggregation:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 -msgid "First, we install the necessary packages:" +#: ../../source/tutorial-quickstart-xgboost.rst:308 +msgid "" +"An ``evaluate_metrics_aggregation`` function is defined to collect and " +"wighted average the AUC values from clients. The ``config_func`` function" +" is to return the current FL round number to client's ``fit()`` and " +"``evaluate()`` methods." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "Tree-based Bagging Aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:315 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +#: ../../source/tutorial-quickstart-xgboost.rst:317 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:414 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:474 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:479 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by serialisation, and sending the global model back to each " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:483 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:533 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``History (metrics, " +"distributed, evaluate)``. One can see that the average AUC increases over" +" FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:547 +msgid "" +"Check the full `source code " +"`_ " +"for this example in ``examples/xgboost-quickstart`` in the Flower GitHub " +"repository." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:552 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:554 +msgid "" +"Now that you know how federated XGBoost works with Flower, it's time to " +"run some more comprehensive experiments by customising the experimental " +"settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised / distributed evaluation. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:562 +msgid "Cyclic Training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:564 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:570 +msgid "To do this, we first customise a ``ClientManager`` in ``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:610 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to the " +"next client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:655 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Customised Data Partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In ``task.py``, we use the ``instantiate_fds`` function to instantiate " +"Flower Datasets and the data partitioner based on the given " +"``partitioner_type`` and ``num_partitions``. Currently, we provide four " +"supported partitioner type to simulate the uniformity/non-uniformity in " +"data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:726 +msgid "Customised Centralised / Distributed Evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:728 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_app.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 +msgid "" +"This function returns an evaluation function, which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:763 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_app.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:768 +#, fuzzy +msgid "Arguments Explainer" +msgstr "Argumento de compilação" + +#: ../../source/tutorial-quickstart-xgboost.rst:770 +msgid "" +"We define all hyper-parameters under ``[tool.flwr.app.config]`` entry in " +"``pyproject.toml``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:799 +msgid "" +"On the server side, we allow user to specify training strategies / FL " +"rounds / participating clients / clients for evaluation, and evaluation " +"fashion. Note that with ``centralised-eval = true``, the sever will do " +"centralised evaluation and all functionalities for client evaluation will" +" be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:804 +msgid "" +"On the client side, we can define various options for client data " +"partitioning. Besides, clients also have an option to conduct evaluation " +"on centralised test set by setting ``centralised-eval = true``, as well " +"as an option to perform scaled learning rate based on the number of " +"clients by setting ``scaled-lr = true``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:810 +#, fuzzy +msgid "Example Commands" +msgstr "Exemplo" + +#: ../../source/tutorial-quickstart-xgboost.rst:812 +msgid "To run bagging aggregation for 5 rounds evaluated on centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:818 +msgid "" +"To run cyclic training with linear partitioner type evaluated on " +"centralised test set:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:827 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:833 +msgid "Video Tutorial" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:837 +msgid "" +"The video shown below shows how to setup a XGBoost + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " "GPU > Save``). Note, however, that Google Colab is not always able to " "offer GPU acceleration. If you see an error related to GPU availability " "in one of the following sections, consider switching back to CPU-based " @@ -23812,7 +24304,6 @@ msgid "" msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 msgid "Final remarks" msgstr "" @@ -24128,7 +24619,7 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"🧑‍🏫 This tutorial starts from zero and expects no familiarity with " "federated learning. Only a basic understanding of data science and Python" " programming is assumed." msgstr "" @@ -24147,12 +24638,12 @@ msgid "Let's get started!" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +msgid "Classical Machine Learning" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " +"Before we begin discussing federated learning, let us quickly recap how " "most machine learning works today." msgstr "" @@ -24164,7 +24655,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|ac0a9766e26044d6aea222a829859b20|" +msgid "|80152fa658904be08c849b4a594b76e1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -24179,7 +24670,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgid "|35b60a1068f944ce937ac2988661aad5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -24188,19 +24679,20 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"In practice, the training data we work with doesn't originate on the " +"machine we train the model on." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " +"This data gets created \"somewhere else\". For instance, the data can " +"originate on a smartphone by the user interacting with an app, a car " "collecting sensor data, a laptop receiving input via the keyboard, or a " "smart speaker listening to someone trying to sing a song." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgid "|efead7f2c2224b60b7b42705004c15e6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -24216,7 +24708,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgid "|5421fee4e7ed450c903cbcd8a9d8a5d4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -24226,13 +24718,13 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" "So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"that has been used in the past was to collect all this data on a central " +"server. This server can be located somewhere in a data center, or " +"somewhere in the cloud." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgid "|811fcf35e9214bd5b4e613e41f7c0a27|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -24247,7 +24739,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgid "|e61d38b0948f4c07a7257755f3799b54|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -24260,14 +24752,14 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"This classical machine learning approach we've just seen can be used in " +"some cases. Great examples include categorizing holiday photos, or " +"analyzing web traffic. Cases, where all the data is naturally available " +"on a centralized server." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|5405ed430e4746e28b083b146fb71731|" +msgid "|e82c29351e2e480087c61b939eb7c041|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -24282,7 +24774,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|a389e87dab394eb48a8949aa2397687b|" +msgid "|21ca40f4fb1a405c89098fd1d24880a4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -24291,7 +24783,7 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" -"There are many reasons why the classic centralized machine learning " +"There are many reasons why the classical centralized machine learning " "approach does not work for a large number of highly important real-world " "use cases. Those reasons include:" msgstr "" @@ -24304,9 +24796,9 @@ msgid "" "(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " "protect sensitive data from being moved. In fact, those regulations " "sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"users' data for machine learning training because those users live in " +"different parts of the world, and their data is governed by different " +"data protection regulations." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 @@ -24338,21 +24830,21 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 msgid "" "Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"detection models." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" "Financial information from different organizations to detect financial " -"fraud" +"fraud." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +msgid "Location data from your electric car to make better range prediction." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +msgid "End-to-end encrypted messages to train better auto-complete models." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 @@ -24367,42 +24859,42 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +msgid "Federated Learning" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" -"Federated learning simply reverses this approach. It enables machine " +"Federated Learning simply reverses this approach. It enables machine " "learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"of moving the data to the training. Here's a one-liner explanation:" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +msgid "Centralized machine learning: move the data to the computation" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +msgid "Federated (machine) Learning: move the computation to the data" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"By doing so, Federated Learning enables us to use machine learning (and " +"other data science approaches) in areas where it wasn't possible before. " +"We can now train excellent medical AI models by enabling different " +"hospitals to work together. We can solve financial fraud by training AI " +"models on the data of different financial institutions. We can build " +"novel privacy-enhancing applications (such as secure messaging) that have" +" better built-in AI than their non-privacy-enhancing alternatives. And " +"those are just a few of the examples that come to mind. As we deploy " +"Federated Learning, we discover more and more areas that can suddenly be " +"reinvented because they now have access to vast amounts of previously " +"inaccessible data." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" -"So how does federated learning work, exactly? Let's start with an " +"So how does Federated Learning work, exactly? Let's start with an " "intuitive explanation." msgstr "" @@ -24422,7 +24914,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|89c412136a5146ec8dc32c0973729f12|" +msgid "|1351a2629c2c46d981b13b19f9fa45f0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -24439,14 +24931,14 @@ msgstr "" msgid "" "Next, we send the parameters of the global model to the connected client " "nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"organizations). This is to ensure that each participating node starts its" +" local training using the same model parameters. We often use only a few " +"of the connected nodes instead of all nodes. The reason for this is that " +"selecting more and more client nodes has diminishing returns." msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgid "|124c2c188b994c7ab1c862cfdb326923|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -24470,7 +24962,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgid "|42e1951c36f2406e93c7ae0ec5b299f9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -24493,7 +24985,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgid "|ec637b8a84234d068995ee1ccb2dd3b1|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -24518,7 +25010,7 @@ msgid "" "In order to get one single model, we have to combine all the model " "updates we received from the client nodes. This process is called " "*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +" way is called *Federated Averaging* (`McMahan et al., 2016 " "`__), often abbreviated as *FedAvg*. " "*FedAvg* takes the 100 model updates and, as the name suggests, averages " "them. To be more precise, it takes the *weighted average* of the model " @@ -24531,7 +25023,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgid "|5bceb9d16b1a4d2db18d8a5b2f0cacb3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -24583,7 +25075,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" +msgid "Federated Analytics" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 @@ -24613,8154 +25105,12399 @@ msgstr "" msgid "Flower" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|502b10044e864ca2b15282a393ab7faf|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final Remarks" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" + +#~ msgid "Server-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" + +#~ msgid "The following examples are available as standalone projects." +#~ msgstr "" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" +#~ msgstr "" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" + +#~ msgid "Extra Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" + +#~ msgid "For PyTorch examples::" +#~ msgstr "" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" + +#~ msgid "PyTorch Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" + +#~ msgid "First, start a Flower server:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" + +#~ msgid "TensorFlow Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgstr "" + +#~ msgid "" +#~ "`Flower meets KOSMoS `_." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "``BASE_IMAGE_TAG``" + +#~ msgid "The image tag of the base image." +#~ msgstr "A tag da imagem da imagem base." + +#~ msgid "" +#~ "Open the notebook ``doc/source/tutorial-" +#~ "get-started-with-flower-pytorch.ipynb``:" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +#~ "/tutorial-get-started-with-flower-" +#~ "pytorch.ipynb" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" +#~ "name/doc/source/tutorial-get-started-with-" +#~ "flower-pytorch.ipynb" +#~ msgstr "" + +#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgstr "" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "" + +#~ msgid "How to write a good PR title" +#~ msgstr "" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" + +#~ msgid "Implement Algorithm" +#~ msgstr "" + +#~ msgid "Database" +#~ msgstr "" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "" + +#~ msgid "Improve code in module" +#~ msgstr "" + +#~ msgid "Change SomeModule" +#~ msgstr "" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "" + +#~ msgid "Changelog entry" +#~ msgstr "" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "" + +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "" + +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "" + +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr "" + +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "" + +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general examples change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "" + +#~ msgid "Note that only one token should be used." +#~ msgstr "" + +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr "" + +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" + +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" + +#~ msgid "MXNet meets Flower" +#~ msgstr "" + +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" + +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" + +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" + +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server can " +#~ "be started and how a Flower client" +#~ " can establish a secure connections " +#~ "to it." +#~ msgstr "" + +#~ msgid "" +#~ "The code example comes with a " +#~ "README.md file which will explain how" +#~ " to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how. Stick to " +#~ "this guide for a deeper introduction " +#~ "to the topic." +#~ msgstr "" + +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh`" +#~ msgstr "" + +#~ msgid "with the following command sequence:" +#~ msgstr "" + +#~ msgid "" +#~ "The approach how the SSL certificates" +#~ " are generated in this example can" +#~ " serve as an inspiration and starting" +#~ " point but should not be taken " +#~ "as complete for production environments. " +#~ "Please refer to other sources regarding" +#~ " the issue of correctly generating " +#~ "certificates for production environments." +#~ msgstr "" + +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "" + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a client which uses the" +#~ " previously generated scripts:" +#~ msgstr "" + +#~ msgid "" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects the PEM-encoded root " +#~ "certificates as a byte string. We " +#~ "are again using :code:`Path` to simplify" +#~ " reading those as byte strings." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " generate self-signed certificates using" +#~ " the given script, start a SSL-" +#~ "enabled server, and have a client " +#~ "establish a secure connection to it." +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "Flower server" +#~ msgstr "" + +#~ msgid "" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." +#~ msgstr "" + +#~ msgid "" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." +#~ msgstr "" + +#~ msgid "Using a different Flower or Python version" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " +#~ "` | :doc" +#~ ":`scikit-learn `" +#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "" + +#~ msgid "flower-fleet-api" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "" + +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." +#~ msgstr "" + +#~ msgid "start\\_driver" +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "The Driver object to use." +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "" + +#~ msgid "Quickstart MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "" + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" + +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "" + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`load_mnist()`" +#~ msgstr "" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "" + +#~ msgid ":code:`shuffle()`" +#~ msgstr "" + +#~ msgid "Shuffles data and its label" +#~ msgstr "" + +#~ msgid ":code:`partition()`" +#~ msgstr "" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy." +#~ msgstr "" + +#~ msgid "Let's get stated!" +#~ msgstr "" + +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4500 training examples and" +#~ " 500 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" + +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" + +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" + +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" + +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" + +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" + +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" + +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" + +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" + +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" + +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" + +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" + +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" + +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" + +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "Atualmente, Flower fornece duas imagens, " +#~ "uma imagem base e uma imagem de" +#~ " servidor. Também haverá uma imagem " +#~ "de cliente em breve. A imagem " +#~ "base, como o nome sugere, contém " +#~ "dependências básicas que tanto o " +#~ "servidor quanto o cliente precisam. Isso" +#~ " inclui dependências do sistema, Python " +#~ "e ferramentas Python. A imagem do " +#~ "servidor é baseada na imagem base, " +#~ "mas também instala o servidor Flower " +#~ "usando ``pip```." + +#~ msgid "``3.11``" +#~ msgstr "``3.11``" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Building the SuperLink image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "Pré-definido para ``flwr/server``." + +#~ msgid "The Python version of the base image." +#~ msgstr "O nome do repositório da imagem base." + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." + +#~ msgid "The PyPI package to install." +#~ msgstr "" + +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Pré-definido para ``flwr/server``." + +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "O nome da imagem é ``flwr_server`` " +#~ "e a tag ``0.1.0``. Lembre-se que" +#~ " os argumentos de compilação, bem " +#~ "como o nome e a tag podem " +#~ "ser adaptados às suas necessidades. " +#~ "Esses valores servem apenas como " +#~ "exemplos." + +#~ msgid "Creating New Messages" +#~ msgstr "Criando novas mensagens" + +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "" + +#~ msgid "Server's side:" +#~ msgstr "" + +#~ msgid "Client's side:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "" + +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "" + +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "" + +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "" + +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "" + +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" + +#~ msgid "The four functions:" +#~ msgstr "" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "" + +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "" + +#~ msgid "Receiving the Message by the Client" +#~ msgstr "" + +#~ msgid "" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" + +#~ msgid "Within the handle function:" +#~ msgstr "" + +#~ msgid "And add a new function:" +#~ msgstr "" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." +#~ msgstr "" + +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "" + +#~ msgid "Unreleased" +#~ msgstr "" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "" + +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "" + +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "" + +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "" + +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "" + +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "" + +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "" + +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "" + +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "" + +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "" + +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" + +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "" + +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" + +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "" + +#~ msgid "Edge Client Engine" +#~ msgstr "Engine do Edge Client" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Edge " +#~ "Client" + +#~ msgid "Virtual Client Engine" +#~ msgstr "Engine do Virtual Client" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Virtual" +#~ " Client" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "" +#~ "Engine do Virtual Client e do Edge" +#~ " Client no mesma carga de trabalho" +#~ " (workload)" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com ambas engines do" +#~ " Virtual Client e do Edge Client" + +#~ msgid "Clone the flower repository." +#~ msgstr "Clone o repositório do flower." + +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ "Por favor, siga a primeira seção " +#~ "em :doc:`Execute o Flower usando Docker" +#~ " `" +#~ " que cobre este passo em mais " +#~ "detalhes." + +#~ msgid "``22.04``" +#~ msgstr "``23.0.1``" + +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" + +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.7.0``" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" + +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "" +#~ "O exemplo a seguir cria uma imagem" +#~ " de servidor com a imagem base " +#~ "oficial do Flower py3.11-ubuntu22.04 e " +#~ "Flower 1.7.0:" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Versão da imagem Docker oficial do Ubuntu." + +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" + +#~ msgid "**Via the UI**" +#~ msgstr "" + +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" + +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" + +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" + +#~ msgid "Preliminarities" +#~ msgstr "" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "" + +#~ msgid "" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verify if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" + +#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgstr "" + +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" + +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" + +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" + +#~ msgid "Flower SuperLink" +#~ msgstr "" + +#~ msgid "Quickstart" +#~ msgstr "" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "" + +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "Enabling SSL for secure connections" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" + +#~ msgid "Flower SuperNode" +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" + +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "Antes de começarmos, precisamos encontrar " +#~ "alguns pré-requisitos em nosso ambiente " +#~ "de desenvolvimento local." + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" + +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" + +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|55472eef61274ba1b739408607e109df|" -msgstr "" +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." +#~ msgstr "" + +#~ msgid "Flower ServerApp" #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "Creating a ServerApp Dockerfile" #~ msgstr "" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." #~ msgstr "" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." #~ msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" + #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." #~ msgstr "" #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" + +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." #~ msgstr "" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" #~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." #~ msgstr "" #~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" + +#~ msgid "Advanced Docker options" +#~ msgstr "" + +#~ msgid "Run with root user privileges" +#~ msgstr "" + +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "**Run a container with root user privileges**" #~ msgstr "" -#~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ msgid "**Run the build process with root user privileges**" #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid "Using a different Flower version" #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid "Pinning a Docker image to a specific version" #~ msgstr "" #~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." #~ msgstr "" #~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "Setting environment variables" #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." #~ msgstr "" #~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and poetentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`scikit-" +#~ "learn ` | " +#~ ":doc:`XGBoost ` |" +#~ " :doc:`Android ` " +#~ "| :doc:`iOS `" #~ msgstr "" -#~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ msgid "flower-client-app" #~ msgstr "" -#~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ msgid ":py:obj:`flwr.client `\\" #~ msgstr "" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ msgid ":py:obj:`flwr.common `\\" #~ msgstr "" -#~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ msgid ":py:obj:`flwr.server `\\" #~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid ":py:obj:`flwr.simulation `\\" #~ msgstr "" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ msgid "Run Flower client app." #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ msgid "Run Flower SuperNode." #~ msgstr "" -#~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ msgid ":py:obj:`flwr.client.mod `\\" #~ msgstr "" -#~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ msgid ":py:obj:`Context `\\ \\(state\\)" #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid "State of your run." #~ msgstr "" -#~ msgid "start_client" +#~ msgid "Metrics record." #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "d defaults to None." #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid ":py:obj:`partition_id `\\" #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "An identifier telling which data partition a ClientApp should use." #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "" +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgid "Run Flower server app." #~ msgstr "" -#~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." #~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgid ":py:obj:`flwr.server.strategy `\\" #~ msgstr "" -#~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgid ":py:obj:`flwr.server.workflow `\\" #~ msgstr "" -#~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgid "run\\_driver\\_api" #~ msgstr "" -#~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ msgid "run\\_fleet\\_api" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ msgid "key shares." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" #~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" #~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." #~ msgstr "" -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." #~ msgstr "" -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" #~ msgstr "" -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ msgid "receive the updated local model weights" #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "which can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" #~ msgid "" -#~ "Source: `Official VSCode documentation " -#~ "`_" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" #~ msgid "" -#~ "`Developing inside a Container " -#~ "`_" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" #~ msgid "" -#~ "`Remote development in Containers " -#~ "`_" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ "If you are not familiar with " -#~ "Flower Baselines, you should probably " -#~ "check-out our `contributing guide for " -#~ "baselines `_." +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" #~ msgid "" -#~ "You should then check out the open" -#~ " `issues " -#~ "`_" -#~ " for baseline requests. If you find" -#~ " a baseline that you'd like to " -#~ "work on and that has no assignes," -#~ " feel free to assign it to " -#~ "yourself and start working on it!" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_." +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." #~ msgstr "" #~ msgid "" -#~ "Git is a distributed version control " -#~ "tool. This allows for an entire " -#~ "codebase's history to be stored and " -#~ "every developer's machine. It is a " -#~ "software that will need to be " -#~ "installed on your local machine, you " -#~ "can follow this `guide " -#~ "`_ to set it up." +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." #~ msgstr "" #~ msgid "" -#~ "A fork is a personal copy of " -#~ "a GitHub repository. To create one " -#~ "for Flower, you must navigate to " -#~ "https://github.com/adap/flower (while connected to" -#~ " your GitHub account) and click the" -#~ " ``Fork`` button situated on the top" -#~ " right of the page." +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." #~ msgstr "" #~ msgid "" -#~ "Now we will add an upstream " -#~ "address to our repository. Still in " -#~ "the same directroy, we must run " -#~ "the following command:" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." #~ msgstr "" -#~ msgid "" -#~ "This can be achieved by following " -#~ "this `getting started guide for " -#~ "contributors`_ (note that you won't need" -#~ " to clone the repository). Once you" -#~ " are able to write code and " -#~ "test it, you can finally start " -#~ "making changes!" +#~ msgid "Let's build a new ``Strategy`` from scratch!" #~ msgstr "" #~ msgid "" -#~ "For our documentation, we’ve started to" -#~ " use the `Diàtaxis framework " -#~ "`_." +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." #~ msgstr "" #~ msgid "" -#~ "Our “How to” guides should have " -#~ "titles that continue the sencence “How" -#~ " to …”, for example, “How to " -#~ "upgrade to Flower 1.0”." +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" #~ msgstr "" #~ msgid "" -#~ "This issue is about changing the " -#~ "title of a doc from present " -#~ "continious to present simple." +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" #~ msgstr "" #~ msgid "" -#~ "Let's take the example of “Saving " -#~ "Progress” which we changed to “Save " -#~ "Progress”. Does this pass our check?" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" #~ msgstr "" -#~ msgid "Before: ”How to saving progress” ❌" +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" #~ msgstr "" -#~ msgid "After: ”How to save progress” ✅" +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ "This is a tiny change, but it’ll" -#~ " allow us to test your end-" -#~ "to-end setup. After cloning and " -#~ "setting up the Flower repo, here’s " -#~ "what you should do:" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." #~ msgstr "" #~ msgid "" -#~ "Build the docs and check the " -#~ "result: ``_" +#~ "`Check out Flower Code Examples " +#~ "`__" #~ msgstr "" -#~ msgid "Here’s how to change the file name:" +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" #~ msgstr "" #~ msgid "" -#~ "Commit the changes (commit messages are" -#~ " always imperative: “Do something”, in " -#~ "this case “Change …”)" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." #~ msgstr "" -#~ msgid "" -#~ "`Good first contributions " -#~ "`_, where you should" -#~ " particularly look into the " -#~ ":code:`baselines` contributions." +#~ msgid "Loading the data" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." #~ msgstr "" #~ msgid "" -#~ "Flower uses :code:`pyproject.toml` to manage" -#~ " dependencies and configure development " -#~ "tools (the ones which support it). " -#~ "Poetry is a build tool which " -#~ "supports `PEP 517 " -#~ "`_." +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing machine learning" -#~ " workload with `FedBN `_, a federated training strategy" -#~ " designed for non-iid data. We " -#~ "are using PyTorch to train a " -#~ "Convolutional Neural Network(with Batch " -#~ "Normalization layers) on the CIFAR-10 " -#~ "dataset. When applying FedBN, only few" -#~ " changes needed compared to `Example: " -#~ "PyTorch - From Centralized To Federated" -#~ " `_." +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" #~ msgid "" -#~ "All files are revised based on " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. The " -#~ "only thing to do is modifying the" -#~ " file called :code:`cifar.py`, revised part" -#~ " is shown below:" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used PyTorch " -#~ "before. Let's take the next step " -#~ "and use what we've built to create" -#~ " a federated learning system within " -#~ "FedBN, the sytstem consists of one " -#~ "server and two clients." +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" #~ msgstr "" #~ msgid "" -#~ "If you have read `Example: PyTorch " -#~ "- From Centralized To Federated " -#~ "`_, the following" -#~ " parts are easy to follow, onyl " -#~ ":code:`get_parameters` and :code:`set_parameters` " -#~ "function in :code:`client.py` needed to " -#~ "revise. If not, please read the " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. first." +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgid "Defining the model" +#~ msgstr "" + +#~ msgid "Training the model" #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" #~ msgstr "" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" #~ msgstr "" -#~ msgid "Ready... Set... Train!" +#~ msgid "Updating model parameters" #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." #~ msgstr "" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" #~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgid "Implementing a Flower client" #~ msgstr "" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "" - -#~ msgid "Now, let's see what is really happening inside." +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." #~ msgstr "" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" #~ msgstr "" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" #~ msgstr "" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." #~ msgstr "" -#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgid "Using the Virtual Client Engine" #~ msgstr "" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" #~ msgstr "" -#~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ msgid "Starting the training" #~ msgstr "" -#~ msgid "A Closer Look" +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." #~ msgstr "" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" #~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" #~ msgstr "" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." #~ msgstr "" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" + +#~ msgid "Let's move beyond FedAvg with Flower strategies!" #~ msgstr "" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" #~ msgstr "" -#~ msgid "Give It a Try" +#~ msgid "" +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." #~ msgstr "" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." #~ msgstr "" #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" #~ msgstr "" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" #~ msgstr "" -#~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ msgid "Differential privacy" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "DP-FedAvg" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid "Simplifying Assumptions" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ msgid ":py:obj:`client `\\" #~ msgstr "" -#~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ msgid ":py:obj:`common `\\" #~ msgstr "" -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ msgid ":py:obj:`server `\\" #~ msgstr "" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ msgid ":py:obj:`simulation `\\" #~ msgstr "" -#~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ msgid ":py:obj:`mod `\\" #~ msgstr "" -#~ msgid "Customizable Responsibility for Noise injection" +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ msgid "run\\_supernode" #~ msgstr "" #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "Wrapper-based approach" +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "Server-side logic" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." #~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ msgid ":py:obj:`strategy `\\" #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid ":py:obj:`workflow `\\" #~ msgstr "" -#~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ msgid "run\\_server\\_app" #~ msgstr "" -#~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ msgid "run\\_superlink" #~ msgstr "" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" -#~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ msgid "Start a Ray-based Flower simulation server." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ msgid "Client-side logic" +#~ msgid "The total number of clients in this simulation." #~ msgstr "" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." #~ msgstr "" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." #~ msgstr "" #~ msgid "" -#~ "McMahan et al. \"Learning Differentially " -#~ "Private Recurrent Language Models.\" " -#~ "International Conference on Learning " -#~ "Representations (ICLR), 2017." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "Private Learning with Adaptive Clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems (NeurIPS), 2021." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" #~ msgstr "" -#~ msgid "" -#~ "This can be achieved by customizing " -#~ "an existing strategy or by `implementing" -#~ " a custom strategy from scratch " -#~ "`_. Here's a nonsensical " -#~ "example that customizes :code:`FedAvg` by " -#~ "adding a custom ``\"hello\": \"world\"`` " -#~ "configuration key/value pair to the " -#~ "config dict of a *single client* " -#~ "(only the first client in the " -#~ "list, the other clients in this " -#~ "round to not receive this \"special\"" -#~ " config value):" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_fit` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_fit`." +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_evaluate` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_evaluate`." +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." #~ msgstr "" #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" #~ msgid "" -#~ "Ray Dashboard: ``_" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" #~ msgid "" -#~ "Ray Metrics: ``_" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "**hist** -- Object containing metrics from training." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "Let's build a federated learning system using fastai and Flower!" #~ msgstr "" -#~ msgid "Flower driver SDK." +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" -#~ msgid "driver" +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" + +#~ msgid "Dependencies" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "Standard Hugging Face workflow" #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "Handling the data" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "Training and testing the model" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid "Creating the model itself" #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid "Federating the example" #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid "Creating the IMDBClient" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" -#~ msgstr "" - -#~ msgid "Request for run ID." +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid "Starting the server" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" -#~ msgid "Get client IDs." +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ msgid "Putting everything together" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ msgid "We can now start client instances using:" #~ msgstr "" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." #~ msgstr "" #~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" #~ msgid "" -#~ "The first preview release of Flower " -#~ "Baselines has arrived! We're kickstarting " -#~ "Flower Baselines with implementations of " -#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," -#~ " and FedAvgM. Check the documentation " -#~ "on how to use [Flower " -#~ "Baselines](https://flower.ai/docs/using-baselines.html). " -#~ "With this first preview release we're" -#~ " also inviting the community to " -#~ "[contribute their own " -#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" -#~ msgid "The following examples are available as standalone projects." +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" -#~ msgid "Quickstart TensorFlow/Keras" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" #~ msgid "" -#~ "`Quickstart TensorFlow (Tutorial) " -#~ "`_" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" #~ msgid "" -#~ "`Quickstart PyTorch (Tutorial) " -#~ "`_" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ msgid "" -#~ "`PyTorch: From Centralized To Federated " -#~ "(Tutorial) `_" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" #~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgid "Before Flower can be imported we have to install it:" #~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" #~ msgstr "" -#~ msgid "Extra Dependencies" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" #~ msgstr "" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." -#~ msgstr "" - -#~ msgid "For PyTorch examples::" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ msgid "For TensorFlow examples::" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" #~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" -#~ msgid "PyTorch Examples" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" +#~ msgid "Each client will have its own dataset." #~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." -#~ msgstr "" - -#~ msgid "First, start a Flower server:" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" #~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "TensorFlow Examples" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "" -#~ "`Flower meets KOSMoS `_." +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the full code example: " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ msgid "|163117eb654a4273babba413cf8065f5|" #~ msgstr "" -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" #~ msgstr "" -#~ msgid "" -#~ "It is recommended to create a " -#~ "virtual environment and run everything " -#~ "within this `virtualenv `_." +#~ msgid "|f403fcd69e4e44409627e748b404c086|" #~ msgstr "" -#~ msgid "" -#~ "First of all, it is recommended to" -#~ " create a virtual environment and run" -#~ " everything within a `virtualenv " -#~ "`_." +#~ msgid "|4b00fe63870145968f8443619a792a42|" #~ msgstr "" -#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgid "|368378731066486fa4397e89bc6b870c|" #~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML `_, a popular" -#~ " image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The utility :code:`utils.load_mnist()` downloads " -#~ "the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" #~ msgstr "" -#~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ msgid "|82324b9af72a4582a81839d55caab767|" #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "" +#~ "The Visual Studio Code Remote - " +#~ "Containers extension lets you use a " +#~ "Docker container as a fully-featured " +#~ "development environment. It allows you " +#~ "to open any folder inside (or " +#~ "mounted into) a container and take " +#~ "advantage of Visual Studio Code's full" +#~ " feature set. A :code:`devcontainer.json` " +#~ "file in your project tells VS Code" +#~ " how to access (or create) a " +#~ "development container with a well-" +#~ "defined tool and runtime stack. This " +#~ "container can be used to run an" +#~ " application or to separate tools, " +#~ "libraries, or runtimes needed for " +#~ "working with a codebase." #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you don't have to do it. " +#~ "Usually it should be enough to " +#~ "install `Docker " +#~ "`_ on your " +#~ "system and ensure its available on " +#~ "your command line. Additionally, install " +#~ "the `VSCode Containers Extension " +#~ "`_." #~ msgstr "" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "" +#~ "If you prefer to use Anaconda for" +#~ " your virtual environment then install " +#~ "and setup the `conda " +#~ "`_ package. After setting" +#~ " it up you can create a virtual" +#~ " environment with:" #~ msgstr "" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "The :code:`SecAgg+` abstraction" #~ msgstr "" -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "The :code:`LightSecAgg` abstraction" #~ msgstr "" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "``_ (while connected " +#~ "to your GitHub account) and click " +#~ "the ``Fork`` button situated on the " +#~ "top right of the page." #~ msgstr "" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "To check which files have been " +#~ "modified compared to the last version" +#~ " (last commit) and to see which " +#~ "files are staged for commit, you " +#~ "can use the :code:`git status` command." #~ msgstr "" -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid "" +#~ "Once you have added all the files" +#~ " you wanted to commit using " +#~ ":code:`git add`, you can finally create" +#~ " your commit using this command:" #~ msgstr "" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid "" +#~ "The \\ is there to " +#~ "explain to others what the commit " +#~ "does. It should be written in an" +#~ " imperative style and be concise. An" +#~ " example would be :code:`git commit " +#~ "-m \"Add images to README\"`." #~ msgstr "" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid "" +#~ ":doc:`Good first contributions `, where you" +#~ " should particularly look into the " +#~ ":code:`baselines` contributions." #~ msgstr "" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." #~ msgstr "" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid "" +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid "" +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "``BASE_IMAGE_TAG``" - -#~ msgid "The image tag of the base image." -#~ msgstr "A tag da imagem da imagem base." - #~ msgid "" -#~ "Open the notebook ``doc/source/tutorial-" -#~ "get-started-with-flower-pytorch.ipynb``:" +#~ "1. Clone the `Flower repository " +#~ "`_ from GitHub::" #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -#~ "/tutorial-get-started-with-flower-" -#~ "pytorch.ipynb" +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" -#~ "name/doc/source/tutorial-get-started-with-" -#~ "flower-pytorch.ipynb" +#~ "If you don't have :code:`pyenv` " +#~ "installed, the following script that " +#~ "will install it, set it up, and" +#~ " create the virtual environment (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" -#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgid "" +#~ "If you already have :code:`pyenv` " +#~ "installed (along with the :code:`pyenv-" +#~ "virtualenv` plugin), you can use the " +#~ "following convenience script (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "3. Install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" #~ msgstr "" -#~ msgid "Open a PR (as shown above)" +#~ msgid "" +#~ "The Flower repository contains a number" +#~ " of convenience scripts to make " +#~ "recurring development tasks easier and " +#~ "less error-prone. See the :code:`/dev`" +#~ " subdirectory for a full list. The" +#~ " following scripts are amongst the " +#~ "most important ones:" #~ msgstr "" -#~ msgid "How to write a good PR title" +#~ msgid "" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" #~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "`_. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" #~ msgstr "" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "Flower uses Poetry to build releases." +#~ " The necessary command is wrapped in" +#~ " a simple script::" #~ msgstr "" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "The resulting :code:`.whl` and :code:`.tar.gz`" +#~ " releases will be stored in the " +#~ ":code:`/dist` subdirectory." #~ msgstr "" -#~ msgid "Implement Algorithm" +#~ msgid "" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" #~ msgstr "" -#~ msgid "Database" +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" -#~ msgid "Improve code in module" +#~ msgid "" +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called :code:`cifar.py`, revised part is " +#~ "shown below:" #~ msgstr "" -#~ msgid "Change SomeModule" +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only :code:`get_parameters`" +#~ " and :code:`set_parameters` function in " +#~ ":code:`client.py` needed to revise. If " +#~ "not, please read the :doc:`Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `. first." #~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ ":code:`server.py` keeps unchanged, we can " +#~ "start the server directly." #~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgid "" +#~ "Finally, we will revise our *client* " +#~ "logic by changing :code:`get_parameters` and" +#~ " :code:`set_parameters` in :code:`client.py`, we" +#~ " will exclude batch normalization " +#~ "parameters from model parameter list " +#~ "when sending to or receiving from " +#~ "the server." #~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`cifar.py` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as :code:`torch` " +#~ "and :code:`torchvision`) need to be " +#~ "imported. You can see that we do" +#~ " not import any package for federated" +#~ " learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." #~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid "" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in :code:`class " +#~ "Net()`." #~ msgstr "" #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" +#~ "The :code:`load_data()` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ ":code:`transform` normalized the data after" +#~ " loading." #~ msgstr "" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set, measures the " +#~ "loss, backpropagates it, and then takes" +#~ " one optimizer step for each batch" +#~ " of training examples." #~ msgstr "" -#~ msgid "Changelog entry" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`test()`. " +#~ "The function loops over all test " +#~ "samples and measures the loss of " +#~ "the model based on the test " +#~ "dataset." #~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in " +#~ ":code:`cifar.py` for the *clients* that " +#~ "are connected to the *server*. The " +#~ "*server* sends model parameters to the" +#~ " clients. The *clients* run the " +#~ "training and update the parameters. The" +#~ " updated parameters are sent back to" +#~ " the *server* which averages all " +#~ "received parameter updates. This describes " +#~ "one round of the federated learning " +#~ "process and we repeat this for " +#~ "multiple rounds." #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ ":code:`server.py` first. The *server* needs" +#~ " to import the Flower package " +#~ ":code:`flwr`. Next, we use the " +#~ ":code:`start_server` function to start a " +#~ "server and tell it to perform " +#~ "three rounds of federated learning." #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined centralized " +#~ "training in :code:`cifar.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`torch` to update the parameters " +#~ "on our PyTorch model:" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`CifarClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. " +#~ ":code:`CifarClient` needs to implement four" +#~ " methods, two methods for getting/setting" +#~ " model parameters, one method for " +#~ "training the model, and one method " +#~ "for testing the model:" #~ msgstr "" -#~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ msgid ":code:`set_parameters`" #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ "loop over the list of model " +#~ "parameters received as NumPy :code:`ndarray`'s" +#~ " (think list of neural network " +#~ "layers)" #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid ":code:`get_parameters`" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ ":code:`ndarray`'s (which is what " +#~ ":code:`flwr.client.NumPyClient` expects)" #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid ":code:`fit`" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid ":code:`evaluate`" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`cifar.py`. So what we really do" +#~ " here is we tell Flower through " +#~ "our :code:`NumPyClient` subclass which of " +#~ "our already defined functions to call" +#~ " for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_client()` by pointing it " +#~ "at the same IP address we used " +#~ "in :code:`server.py`:" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "The :code:`Strategy` abstraction provides a" +#~ " method called :code:`evaluate` that can" +#~ " directly be used to evaluate the " +#~ "current global model parameters. The " +#~ "current server implementation calls " +#~ ":code:`evaluate` after parameter aggregation " +#~ "and before federated evaluation (see " +#~ "next paragraph)." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ "Client-side evaluation happens in the" +#~ " :code:`Client.evaluate` method and can be" +#~ " configured from the server side." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ ":code:`fraction_evaluate`: a :code:`float` defining" +#~ " the fraction of clients that will" +#~ " be selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1` and :code:`100` clients are " +#~ "connected to the server, then :code:`10`" +#~ " will be randomly selected for " +#~ "evaluation. If :code:`fraction_evaluate` is " +#~ "set to :code:`0.0`, federated evaluation " +#~ "will be disabled." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ ":code:`min_evaluate_clients`: an :code:`int`: the" +#~ " minimum number of clients to be " +#~ "selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1`, :code:`min_evaluate_clients` is set " +#~ "to 20, and :code:`100` clients are " +#~ "connected to the server, then :code:`20`" +#~ " clients will be selected for " +#~ "evaluation." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ ":code:`min_available_clients`: an :code:`int` that" +#~ " defines the minimum number of " +#~ "clients which need to be connected " +#~ "to the server before a round of" +#~ " federated evaluation can start. If " +#~ "fewer than :code:`min_available_clients` are " +#~ "connected to the server, the server " +#~ "will wait until more clients are " +#~ "connected before it continues to sample" +#~ " clients for evaluation." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ ":code:`on_evaluate_config_fn`: a function that " +#~ "returns a configuration dictionary which " +#~ "will be sent to the selected " +#~ "clients. The function will be called " +#~ "during each round and provides a " +#~ "convenient way to customize client-side" +#~ " evaluation from the server side, for" +#~ " example, to configure the number of" +#~ " validation steps performed." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Model parameters can also be evaluated" +#~ " during training. :code:`Client.fit` can " +#~ "return arbitrary evaluation results as a" +#~ " dictionary:" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ "The same :code:`Strategy`-customization approach " +#~ "can be used to aggregate custom " +#~ "evaluation results coming from individual " +#~ "clients. Clients can return custom " +#~ "metrics to the server by returning " +#~ "a dictionary:" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" +#~ msgstr "" + +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." #~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgid "Enable node authentication in :code:`SuperNode`" #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" #~ msgstr "" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" -#~ msgstr "" - -#~ msgid "MNIST Training with MXNet" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." #~ msgstr "" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." #~ msgstr "" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like :code:`FedAvg`. " +#~ "Built-in strategies support so-called" +#~ " configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." #~ msgstr "" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter :code:`on_fit_config_fn`:" #~ msgstr "" -#~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ msgid "The :code:`FedAvg` strategy will call this function *every round*." #~ msgstr "" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes :code:`FedAvg` by adding a " +#~ "custom ``\"hello\": \"world\"`` configuration " +#~ "key/value pair to the config dict " +#~ "of a *single client* (only the " +#~ "first client in the list, the " +#~ "other clients in this round to not" +#~ " receive this \"special\" config value):" #~ msgstr "" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" #~ msgstr "" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" #~ msgstr "" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." #~ msgstr "" -#~ msgid "MXNet meets Flower" +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server " +#~ "(:code:`SuperLink`) can be started and " +#~ "how a Flower client (:code:`SuperNode`) " +#~ "can establish a secure connections to" +#~ " it." #~ msgstr "" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "The code example comes with a " +#~ ":code:`README.md` file which explains how " +#~ "to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how it does " +#~ "so. Stick to this guide for a " +#~ "deeper introduction to the topic." #~ msgstr "" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh` with the " +#~ "following command sequence:" #~ msgstr "" -#~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ msgid "" +#~ "This will generate the certificates in" +#~ " :code:`examples/advanced-tensorflow/.cache/certificates`." #~ msgstr "" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects a file path to " +#~ "PEM-encoded root certificates." #~ msgstr "" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgid "The :code:`Strategy` abstraction" #~ msgstr "" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "All strategy implementation are derived " +#~ "from the abstract base class " +#~ ":code:`flwr.server.strategy.Strategy`, both built-in" +#~ " implementations and third party " +#~ "implementations. This means that custom " +#~ "strategy implementations have the exact " +#~ "same capabilities at their disposal as" +#~ " built-in ones." #~ msgstr "" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "Creating a new strategy means " +#~ "implementing a new :code:`class` (derived " +#~ "from the abstract base class " +#~ ":code:`Strategy`) that implements for the " +#~ "previously shown abstract methods:" #~ msgstr "" -#~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ msgid "The :code:`initialize_parameters` method" #~ msgstr "" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ ":code:`initialize_parameters` is called only " +#~ "once, at the very beginning of an" +#~ " execution. It is responsible for " +#~ "providing the initial global model " +#~ "parameters in a serialized form (i.e.," +#~ " as a :code:`Parameters` object)." #~ msgstr "" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "Built-in strategies return user-provided" +#~ " initial parameters. The following example" +#~ " shows how initial parameters can be" +#~ " passed to :code:`FedAvg`:" #~ msgstr "" #~ msgid "" -#~ "This guide describes how to a " -#~ "SSL-enabled secure Flower server can " -#~ "be started and how a Flower client" -#~ " can establish a secure connections " -#~ "to it." +#~ "The Flower server will call " +#~ ":code:`initialize_parameters`, which either returns" +#~ " the parameters that were passed to" +#~ " :code:`initial_parameters`, or :code:`None`. If" +#~ " no parameters are returned from " +#~ ":code:`initialize_parameters` (i.e., :code:`None`), " +#~ "the server will randomly select one " +#~ "client and ask it to provide its" +#~ " parameters. This is a convenience " +#~ "feature and not recommended in practice," +#~ " but it can be useful for " +#~ "prototyping. In practice, it is " +#~ "recommended to always use server-side" +#~ " parameter initialization." #~ msgstr "" -#~ msgid "" -#~ "The code example comes with a " -#~ "README.md file which will explain how" -#~ " to start it. Although it is " -#~ "already SSL-enabled, it might be " -#~ "less descriptive on how. Stick to " -#~ "this guide for a deeper introduction " -#~ "to the topic." +#~ msgid "The :code:`configure_fit` method" #~ msgstr "" #~ msgid "" -#~ "Using SSL-enabled connections requires " -#~ "certificates to be passed to the " -#~ "server and client. For the purpose " -#~ "of this guide we are going to " -#~ "generate self-signed certificates. As " -#~ "this can become quite complex we " -#~ "are going to ask you to run " -#~ "the script in :code:`examples/advanced-" -#~ "tensorflow/certificates/generate.sh`" +#~ ":code:`configure_fit` is responsible for " +#~ "configuring the upcoming round of " +#~ "training. What does *configure* mean in" +#~ " this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_fit` makes this clear:" #~ msgstr "" -#~ msgid "with the following command sequence:" +#~ msgid "" +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_fit`:" #~ msgstr "" #~ msgid "" -#~ "The approach how the SSL certificates" -#~ " are generated in this example can" -#~ " serve as an inspiration and starting" -#~ " point but should not be taken " -#~ "as complete for production environments. " -#~ "Please refer to other sources regarding" -#~ " the issue of correctly generating " -#~ "certificates for production environments." +#~ "Use the :code:`client_manager` to randomly " +#~ "sample all (or a subset of) " +#~ "available clients (each represented as a" +#~ " :code:`ClientProxy` object)" #~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`FitIns` holding the current " +#~ "global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to train, for example, different models" +#~ " on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." #~ msgstr "" -#~ msgid "" -#~ "We are now going to show how " -#~ "to write a client which uses the" -#~ " previously generated scripts:" +#~ msgid "The :code:`aggregate_fit` method" #~ msgstr "" #~ msgid "" -#~ "When setting :code:`root_certificates`, the " -#~ "client expects the PEM-encoded root " -#~ "certificates as a byte string. We " -#~ "are again using :code:`Path` to simplify" -#~ " reading those as byte strings." +#~ ":code:`aggregate_fit` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to train in :code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "You should now have learned how to" -#~ " generate self-signed certificates using" -#~ " the given script, start a SSL-" -#~ "enabled server, and have a client " -#~ "establish a secure connection to it." +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_fit`). :code:`aggregate_fit` " +#~ "therefore receives a list of " +#~ ":code:`results`, but also a list of " +#~ ":code:`failures`." #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ ":code:`aggregate_fit` returns an optional " +#~ ":code:`Parameters` object and a dictionary " +#~ "of aggregated metrics. The :code:`Parameters`" +#~ " return value is optional because " +#~ ":code:`aggregate_fit` might decide that the" +#~ " results provided are not sufficient " +#~ "for aggregation (e.g., too many " +#~ "failures)." #~ msgstr "" -#~ msgid "Flower server" +#~ msgid "The :code:`configure_evaluate` method" #~ msgstr "" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ ":code:`configure_evaluate` is responsible for " +#~ "configuring the upcoming round of " +#~ "evaluation. What does *configure* mean " +#~ "in this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_evaluate` makes this clear:" #~ msgstr "" #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_evaluate`:" #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`EvaluateIns` holding the current" +#~ " global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to evaluate, for example, different " +#~ "models on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." #~ msgstr "" -#~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ msgid "The :code:`aggregate_evaluate` method" #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ ":code:`aggregate_evaluate` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to evaluate in :code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_evaluate`). " +#~ ":code:`aggregate_evaluate` therefore receives a " +#~ "list of :code:`results`, but also a " +#~ "list of :code:`failures`." #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ ":code:`aggregate_evaluate` returns an optional " +#~ ":code:`float` (loss) and a dictionary of" +#~ " aggregated metrics. The :code:`float` " +#~ "return value is optional because " +#~ ":code:`aggregate_evaluate` might decide that " +#~ "the results provided are not sufficient" +#~ " for aggregation (e.g., too many " +#~ "failures)." #~ msgstr "" -#~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ msgid "The :code:`evaluate` method" #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "" +#~ ":code:`evaluate` is responsible for evaluating" +#~ " model parameters on the server-side." +#~ " Having :code:`evaluate` in addition to " +#~ ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables" +#~ " strategies to perform both servers-" +#~ "side and client-side (federated) " +#~ "evaluation." #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "The return value is again optional " +#~ "because the strategy might not need " +#~ "to implement server-side evaluation or" +#~ " because the user-defined :code:`evaluate`" +#~ " method might not complete successfully " +#~ "(e.g., it might fail to load the" +#~ " server-side evaluation data)." #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "Stable releases are available on `PyPI" +#~ " `_::" #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` should be " +#~ "installed with the ``simulation`` extra::" #~ msgstr "" #~ msgid "" -#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " -#~ "` | :doc:`🤗 " -#~ "Transformers ` " -#~ "| :doc:`JAX ` |" -#~ " :doc:`Pandas ` " -#~ "| :doc:`fastai `" -#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " -#~ "` | :doc" -#~ ":`scikit-learn `" -#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" #~ msgstr "" -#~ msgid "flower-driver-api" +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" #~ msgstr "" -#~ msgid "flower-fleet-api" +#~ msgid "or with ``mamba``::" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "New (possibly unstable) versions of " +#~ "Flower are sometimes available as " +#~ "pre-release versions (alpha, beta, release" +#~ " candidate) before the stable release " +#~ "happens::" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` pre-releases " +#~ "should be installed with the " +#~ "``simulation`` extra::" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "The latest (potentially unstable) changes " +#~ "in Flower are available as nightly " +#~ "releases::" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr-nightly`` should " +#~ "be installed with the ``simulation`` " +#~ "extra::" #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "You can look at everything at ``_ ." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port :code:`3000` on " +#~ "your machine as long as they are" +#~ " running." #~ msgstr "" -#~ msgid "Start a Flower Driver API server." +#~ msgid "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ ":code:`client_num_gpus = 0.5` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ ":code:`client_num_gpus = 2`, the simulation" +#~ " wouldn't start (even if you had " +#~ "2 GPUs but decided to set 1 " +#~ "in :code:`ray_init_args`)." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to " +#~ "``_." #~ msgstr "" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "Ray Metrics: ``_" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" #~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "" +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." #~ msgstr "" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." #~ msgstr "" -#~ msgid "start\\_driver" +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." -#~ msgstr "" - -#~ msgid "The Driver object to use." -#~ msgstr "" - -#~ msgid "Starting a driver that connects to an insecure server:" +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgid "" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." #~ msgstr "" -#~ msgid "Quickstart MXNet" +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." #~ msgstr "" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" #~ msgstr "" -#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgid "" +#~ "Model updates can be persisted on " +#~ "the server-side by customizing " +#~ ":code:`Strategy` methods. Implementing custom " +#~ "strategies is always an option, but " +#~ "for many cases it may be more " +#~ "convenient to simply customize an " +#~ "existing strategy. The following code " +#~ "example defines a new " +#~ ":code:`SaveModelStrategy` which customized the " +#~ "existing built-in :code:`FedAvg` strategy. " +#~ "In particular, it customizes " +#~ ":code:`aggregate_fit` by calling " +#~ ":code:`aggregate_fit` in the base class " +#~ "(:code:`FedAvg`). It then continues to " +#~ "save returned (aggregated) weights before " +#~ "it returns those aggregated weights to" +#~ " the caller (i.e., the server):" #~ msgstr "" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" #~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." #~ msgstr "" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" #~ msgstr "" -#~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "Flower allows full customization of the" +#~ " learning process through the " +#~ ":code:`Strategy` abstraction. A number of " +#~ "built-in strategies are provided in " +#~ "the core framework." #~ msgstr "" -#~ msgid "They can be implemented in the following way:" +#~ msgid "Use an existing strategy, for example, :code:`FedAvg`" #~ msgstr "" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ ":code:`start_server` function. It is usually" +#~ " recommended to adjust a few " +#~ "parameters during instantiation:" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "The server can pass new configuration" +#~ " values to the client each round " +#~ "by providing a function to " +#~ ":code:`on_fit_config_fn`. The provided function " +#~ "will be called by the strategy and" +#~ " must return a dictionary of " +#~ "configuration key values pairs that will" +#~ " be sent to the client. It must" +#~ " return a dictionary of arbitrary " +#~ "configuration values :code:`client.fit` and " +#~ ":code:`client.evaluate` functions during each " +#~ "round of federated learning." #~ msgstr "" #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and potentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." -#~ msgstr "" - -#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ "Similar to :code:`on_fit_config_fn`, there is" +#~ " also :code:`on_evaluate_config_fn` to customize" +#~ " the configuration sent to " +#~ ":code:`client.evaluate()`" #~ msgstr "" -#~ msgid ":code:`load_mnist()`" +#~ msgid "" +#~ "Server-side evaluation can be enabled" +#~ " by passing an evaluation function to" +#~ " :code:`evaluate_fn`." #~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" -#~ msgid ":code:`shuffle()`" +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" #~ msgstr "" -#~ msgid "Shuffles data and its label" +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." #~ msgstr "" -#~ msgid ":code:`partition()`" +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." #~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy." +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." #~ msgstr "" -#~ msgid "Let's get stated!" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4500 training examples and" -#~ " 500 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" #~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " :code:`jax`, :code:`jaxlib`, :code:`scikit-" +#~ "learn`, and :code:`flwr`:" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Linear Regression` model. " +#~ "If you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`jax_training.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) linear regression training. " +#~ "First, the JAX packages :code:`jax` and" +#~ " :code:`jaxlib` need to be imported. " +#~ "In addition, we need to import " +#~ ":code:`sklearn` since we use " +#~ ":code:`make_regression` for the dataset and" +#~ " :code:`train_test_split` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the :code:`flwr` package" +#~ " for federated learning. This will be" +#~ " done later." #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "mentioned training and test sets." #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "" +#~ "The model architecture (a very simple" +#~ " :code:`Linear Regression` model) is " +#~ "defined in :code:`load_model()`." #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`), which loops " +#~ "over the training set and measures " +#~ "the loss (function :code:`loss_fn()`) for " +#~ "each batch of training examples. The " +#~ "loss function is separate since JAX " +#~ "takes derivatives with a :code:`grad()` " +#~ "function (defined in the :code:`main()` " +#~ "function and called in :code:`train()`)." #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`evaluation()`." +#~ " The function takes all test examples" +#~ " and measures the loss of the " +#~ "linear regression model." #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the :code:`jax.grad()` " +#~ "function is defined in :code:`main()` " +#~ "and passed to :code:`train()`." #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`jax_training.py` for the" +#~ " *clients* that are connected to the" +#~ " *server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined JAX training" +#~ " in :code:`jax_training.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`jax` and :code:`jaxlib` to update" +#~ " the parameters on our JAX model:" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`FlowerClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. :code:`FlowerClient` needs to " +#~ "implement four methods, two methods for" +#~ " getting/setting model parameters, one " +#~ "method for training the model, and " +#~ "one method for testing the model:" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid ":code:`set_parameters (optional)`" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "transform parameters to NumPy :code:`ndarray`'s" #~ msgstr "" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ ":code:`DeviceArray` to :code:`NumPy ndarray` " +#~ "to make them compatible with " +#~ "`NumPyClient`." #~ msgstr "" -#~ "Atualmente, Flower fornece duas imagens, " -#~ "uma imagem base e uma imagem de" -#~ " servidor. Também haverá uma imagem " -#~ "de cliente em breve. A imagem " -#~ "base, como o nome sugere, contém " -#~ "dependências básicas que tanto o " -#~ "servidor quanto o cliente precisam. Isso" -#~ " inclui dependências do sistema, Python " -#~ "e ferramentas Python. A imagem do " -#~ "servidor é baseada na imagem base, " -#~ "mas também instala o servidor Flower " -#~ "usando ``pip```." - -#~ msgid "``3.11``" -#~ msgstr "``3.11``" - -#~ msgid "Defaults to ``22.04``." -#~ msgstr "Como padrão ``22.04``." -#~ msgid "Building the SuperLink image" -#~ msgstr "Construindo a imagem do servidor" - -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "Pré-definido para ``flwr/server``." - -#~ msgid "The Python version of the base image." -#~ msgstr "O nome do repositório da imagem base." - -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "Como padrão ``22.04``." - -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." - -#~ msgid "The PyPI package to install." +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`evaluate()` previously defined in " +#~ ":code:`jax_training.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Pré-definido para ``flwr/server``." - #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." #~ msgstr "" -#~ "O nome da imagem é ``flwr_server`` " -#~ "e a tag ``0.1.0``. Lembre-se que" -#~ " os argumentos de compilação, bem " -#~ "como o nome e a tag podem " -#~ "ser adaptados às suas necessidades. " -#~ "Esses valores servem apenas como " -#~ "exemplos." - -#~ msgid "Creating New Messages" -#~ msgstr "Criando novas mensagens" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ msgid ":code:`get_model_parameters()`" #~ msgstr "" -#~ msgid "Server's side:" +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "Client's side:" +#~ msgid ":code:`set_model_params()`" #~ msgstr "" -#~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "Message Types for Protocol Buffers" +#~ msgid ":code:`set_initial_params()`" #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" #~ msgstr "" -#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" -#~ msgid "Within the ClientMessage block:" +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." #~ msgstr "" #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." #~ msgstr "" -#~ msgid "Once that is done, we will compile the file with:" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" #~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgid ":code:`set_parameters` (optional)" #~ msgstr "" -#~ msgid "Serialization and Deserialization Functions" +#~ msgid "is directly imported with :code:`utils.set_model_params()`" #~ msgstr "" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ msgid "The four functions:" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" -#~ msgid "Sending the Message from the Server" +#~ msgid ":code:`server.py`, import Flower and start the server:" #~ msgstr "" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." #~ msgstr "" -#~ msgid "Receiving the Message by the Client" +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." #~ msgstr "" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." #~ msgstr "" -#~ msgid "Within the handle function:" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ msgid "And add a new function:" +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" #~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" #~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" #~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" -#~ msgid "Run Flower server (Driver API)." +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." #~ msgstr "" -#~ msgid "Unreleased" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ msgid "Edge Client Engine" -#~ msgstr "Engine do Edge Client" - #~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Edge Client Engine" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" -#~ "`Flower `_ arquitetura principal" -#~ " do framework com Engine do Edge " -#~ "Client" - -#~ msgid "Virtual Client Engine" -#~ msgstr "Engine do Virtual Client" #~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Virtual Client Engine" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" -#~ "`Flower `_ arquitetura principal" -#~ " do framework com Engine do Virtual" -#~ " Client" -#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" -#~ "Engine do Virtual Client e do Edge" -#~ " Client no mesma carga de trabalho" -#~ " (workload)" #~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with both Virtual Client " -#~ "Engine and Edge Client Engine" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" -#~ "`Flower `_ arquitetura principal" -#~ " do framework com ambas engines do" -#~ " Virtual Client e do Edge Client" - -#~ msgid "Clone the flower repository." -#~ msgstr "Clone o repositório do flower." #~ msgid "" -#~ "Please follow the first section on " -#~ ":doc:`Run Flower using Docker ` which " -#~ "covers this step in more detail." +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" -#~ "Por favor, siga a primeira seção " -#~ "em :doc:`Execute o Flower usando Docker" -#~ " `" -#~ " que cobre este passo em mais " -#~ "detalhes." - -#~ msgid "``22.04``" -#~ msgstr "``23.0.1``" - -#~ msgid "``23.0.1``" -#~ msgstr "``23.0.1``" - -#~ msgid "``69.0.2``" -#~ msgstr "``69.0.2``" -#~ msgid "``1.8.0``" -#~ msgstr "``1.7.0``" - -#~ msgid "Building the SuperLink/SuperNode or ServerApp image" -#~ msgstr "Construindo a imagem do servidor" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" -#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." #~ msgstr "" #~ msgid "" -#~ "The following example creates a " -#~ "SuperLink/SuperNode or ServerApp image with" -#~ " the official Flower base image:" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." #~ msgstr "" -#~ "O exemplo a seguir cria uma imagem" -#~ " de servidor com a imagem base " -#~ "oficial do Flower py3.11-ubuntu22.04 e " -#~ "Flower 1.7.0:" -#~ msgid "Trigger the CI for building the Docker images." -#~ msgstr "Versão da imagem Docker oficial do Ubuntu." +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#~ msgid "" -#~ "To trigger the workflow, a collaborator" -#~ " must create a ``workflow_dispatch`` event" -#~ " in the GitHub CI. This can be" -#~ " done either through the UI or " -#~ "via the GitHub CLI. The event " -#~ "requires only one input, the Flower " -#~ "version, to be released." +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ msgid "**Via the UI**" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" #~ msgstr "" -#~ msgid "" -#~ "Go to the ``Build docker images`` " -#~ "workflow `page " -#~ "`_." +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" #~ msgstr "" -#~ msgid "" -#~ "Click on the ``Run workflow`` button " -#~ "and type the new version of Flower" -#~ " in the ``Version of Flower`` input" -#~ " field." +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ msgid "**Via the GitHub CI**" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ msgid "" -#~ "Make sure you are logged in via" -#~ " ``gh auth login`` and that the " -#~ "current working directory is the root" -#~ " of the Flower repository." +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ msgid "" -#~ "Trigger the workflow via ``gh workflow" -#~ " run docker-images.yml -f flwr-" -#~ "version=``." +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ msgid "Preliminarities" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "Example: JAX - Run JAX Federated" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "" -#~ "\\small\n" -#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "" -#~ "The following command can be used " -#~ "to verify if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" #~ msgstr "" -#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__. Supported " -#~ "architectures include ``amd64`` and " -#~ "``arm64v8``." +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "" -#~ "If you do not see the version " -#~ "of Docker but instead get an error" -#~ " saying that the command was not " -#~ "found, you will need to install " -#~ "Docker first. You can find installation" -#~ " instruction `here `_." +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid "" -#~ "On Linux, Docker commands require " -#~ "``sudo`` privilege. If you want to " -#~ "avoid using ``sudo``, you can follow " -#~ "the `Post-installation steps " -#~ "`_" -#~ " on the official Docker website." +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "" -#~ "To ensure optimal performance and " -#~ "compatibility, the SuperLink, SuperNode and" -#~ " ServerApp image must have the same" -#~ " version when running together. This " -#~ "guarantees seamless integration and avoids " -#~ "potential conflicts or issues that may" -#~ " arise from using different versions." +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid "Flower SuperLink" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" #~ msgstr "" -#~ msgid "Quickstart" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" #~ msgstr "" -#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ msgid "" -#~ "The command pulls the Docker image " -#~ "with the tag ``1.8.0`` from Docker " -#~ "Hub. The tag specifies the Flower " -#~ "version. In this case, Flower 1.8.0. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ msgid "|e6ca84e1df244f238288a768352678e5|" #~ msgstr "" -#~ msgid "" -#~ "By default, the Flower SuperLink keeps" -#~ " state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ msgid "|39c2422082554a21963baffb33a0d057|" #~ msgstr "" -#~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower SuperLink. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" #~ msgstr "" -#~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `__ when " -#~ "deploying to a production environment." +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" #~ msgstr "" -#~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the SuperLink " -#~ "supports:" +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" #~ msgstr "" -#~ msgid "Mounting a volume to store the state on the host system" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a directory where you want " -#~ "to save the file on your host " -#~ "system and a name for the database" -#~ " file. By default, the SuperLink " -#~ "container runs with a non-root " -#~ "user called ``app`` with the user " -#~ "ID ``49999``. It is recommended to " -#~ "create new directory and change the " -#~ "user ID of the directory to " -#~ "``49999`` to ensure the mounted " -#~ "directory has the proper permissions. If" -#~ " you later want to delete the " -#~ "directory, you can change the user " -#~ "ID back to the current user ID " -#~ "by running ``sudo chown -R $USER:$(id" -#~ " -gn) state``." +#~ "If you don't have ``pyenv`` installed," +#~ " the following script that will " +#~ "install it, set it up, and create" +#~ " the virtual environment (with ``Python " +#~ "3.9.20`` by default):" #~ msgstr "" #~ msgid "" -#~ "In the example below, we create a" -#~ " new directory, change the user ID" -#~ " and tell Docker via the flag " -#~ "``--volume`` to mount the local " -#~ "``state`` directory into the ``/app/state``" -#~ " directory of the container. Furthermore," -#~ " we use the flag ``--database`` to" -#~ " specify the name of the database " -#~ "file." +#~ "If you already have ``pyenv`` installed" +#~ " (along with the ``pyenv-virtualenv`` " +#~ "plugin), you can use the following " +#~ "convenience script (with ``Python 3.9.20`` " +#~ "by default):" #~ msgstr "" -#~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the ``state`` directory on your host " -#~ "system. If the file already exists, " -#~ "the SuperLink tries to restore the " -#~ "state from the file. To start the" -#~ " SuperLink with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "Enabling SSL for secure connections" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "" -#~ "To enable SSL, you will need a " -#~ "PEM-encoded root certificate, a PEM-" -#~ "encoded private key and a PEM-" -#~ "encoded certificate chain." +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/certificates/`` directory of the " -#~ "container. This allows the SuperLink to" -#~ " access the files within the " -#~ "container. The ``ro`` stands for " -#~ "``read-only``. Docker volumes default to" -#~ " ``read-write``; that option tells " -#~ "Docker to make the volume ``read-" -#~ "only`` instead. Finally, we pass the " -#~ "names of the certificates and key " -#~ "file to the SuperLink with the " -#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " -#~ "and ``--ssl-keyfile`` flag." +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "" -#~ "Because Flower containers, by default, " -#~ "run with a non-root user ``app``," -#~ " the mounted files and directories " -#~ "must have the proper permissions for " -#~ "the user ID ``49999``. For example, " -#~ "to change the user ID of all " -#~ "files in the ``certificates/`` directory, " -#~ "you can run ``sudo chown -R " -#~ "49999:49999 certificates/*``." +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "Flower SuperNode" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "" -#~ "The SuperNode Docker image comes with" -#~ " a pre-installed version of Flower" -#~ " and serves as a base for " -#~ "building your own SuperNode image." +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "" -#~ "The SuperNode Docker image currently " -#~ "works only with the 1.9.0-nightly " -#~ "release. A stable version will be " -#~ "available when Flower 1.9.0 (stable) " -#~ "gets released (ETA: May). A SuperNode" -#~ " nightly image must be paired with" -#~ " the corresponding SuperLink and ServerApp" -#~ " nightly images released on the same" -#~ " day. To ensure the versions are " -#~ "in sync, using the concrete tag, " -#~ "e.g., ``1.9.0.dev20240501`` instead of " -#~ "``nightly`` is recommended." +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" + +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "" -#~ "We will use the ``quickstart-pytorch``" -#~ " example, which you can find in " -#~ "the Flower repository, to illustrate how" -#~ " you can dockerize your ClientApp." +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "" -#~ "Before we can start, we need to" -#~ " meet a few prerequisites in our " -#~ "local development environment. You can " -#~ "skip the first part if you want" -#~ " to run your ClientApp instead of " -#~ "the ``quickstart-pytorch`` example." +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ "Antes de começarmos, precisamos encontrar " -#~ "alguns pré-requisitos em nosso ambiente " -#~ "de desenvolvimento local." -#~ msgid "Creating a SuperNode Dockerfile" +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "Let's assume the following project layout:" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" #~ msgstr "" -#~ msgid "" -#~ "First, we need to create a " -#~ "``requirements.txt`` file in the directory " -#~ "where the ``ClientApp`` code is located." -#~ " In the file, we list all the" -#~ " dependencies that the ClientApp requires." +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" #~ msgid "" -#~ "Note that `flwr `__" -#~ " is already installed in the " -#~ "``flwr/supernode`` base image, so you " -#~ "only need to include other package " -#~ "dependencies in your ``requirements.txt``, " -#~ "such as ``torch``, ``tensorflow``, etc." +#~ "Include SecAgg, SecAgg+, and LightSecAgg " +#~ "protocol. The LightSecAgg protocol has " +#~ "not been implemented yet, so its " +#~ "diagram and abstraction may not be " +#~ "accurate in practice. The SecAgg " +#~ "protocol can be considered as a " +#~ "special case of the SecAgg+ protocol." #~ msgstr "" -#~ msgid "" -#~ "Next, we create a Dockerfile. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.supernode`` in ``examples/quickstart-" -#~ "pytorch``." +#~ msgid "The ``SecAgg+`` abstraction" #~ msgstr "" #~ msgid "" -#~ "The ``Dockerfile.supernode`` contains the " -#~ "instructions that assemble the SuperNode " -#~ "image." +#~ "In this implementation, each client will" +#~ " be assigned with a unique index " +#~ "(int) for secure aggregation, and thus" +#~ " many python dictionaries used have " +#~ "keys of int type rather than " +#~ "ClientProxy type." #~ msgstr "" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the SuperNode " -#~ "image tagged ``nightly`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. Next, we install the" -#~ " ClientApp dependencies by copying the " -#~ "``requirements.txt`` file into the image " -#~ "and run ``pip install``. In the " -#~ "last two lines, we copy the " -#~ "``client.py`` module into the image and" -#~ " set the entry point to ``flower-" -#~ "client-app`` with the argument " -#~ "``client:app``. The argument is the " -#~ "object reference of the ClientApp " -#~ "(``:``) that will be run" -#~ " inside the ClientApp." +#~ "The Flower server will execute and " +#~ "process received results in the " +#~ "following order:" #~ msgstr "" -#~ msgid "Building the SuperNode Docker image" -#~ msgstr "Construindo a imagem do servidor" +#~ msgid "The ``LightSecAgg`` abstraction" +#~ msgstr "" -#~ msgid "" -#~ "Next, we build the SuperNode Docker " -#~ "image by running the following command" -#~ " in the directory where Dockerfile " -#~ "and ClientApp code are located." +#~ msgid "Types" #~ msgstr "" #~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_supernode``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." +#~ "Docker Compose is `installed " +#~ "`_." #~ msgstr "" -#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgid "Build and start the services using the following command:" #~ msgstr "" -#~ msgid "Let's break down each part of this command:" +#~ msgid "Run the example:" +#~ msgstr "Exemplo" + +#~ msgid "Follow the logs of the SuperExec service:" #~ msgstr "" -#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgid "Only runs on AMD64." #~ msgstr "" #~ msgid "" -#~ "``--rm``: This option specifies that the" -#~ " container should be automatically removed" -#~ " when it stops." +#~ "Use the method that works best for" +#~ " you to copy the ``server`` " +#~ "directory, the certificates, and your " +#~ "Flower project to the remote machine." #~ msgstr "" -#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``server`` Docker Compose files." #~ msgstr "" -#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``client`` Docker Compose files." #~ msgstr "" #~ msgid "" -#~ "``--superlink 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "The Path of the ``root-certificates``" +#~ " should be relative to the location" +#~ " of the ``pyproject.toml`` file." #~ msgstr "" -#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgid "To run the project, execute:" #~ msgstr "" -#~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your SuperNodes." +#~ msgid "Run the ``quickstart-docker`` project by executing the command:" #~ msgstr "" -#~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "SuperNode binary. To see all available" -#~ " flags that the SuperNode supports, " -#~ "run:" +#~ msgid "Follow the SuperExec logs to track the execution of the run:" #~ msgstr "" -#~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your SuperNode container." +#~ msgid "Execute the command to run the quickstart example:" #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flag when starting the container." +#~ msgid "Monitor the SuperExec logs and wait for the summary to appear:" #~ msgstr "" -#~ msgid "Flower ServerApp" +#~ msgid "Example: FedBN in PyTorch - From Centralized To Federated" #~ msgstr "" #~ msgid "" -#~ "The procedure for building and running" -#~ " a ServerApp image is almost " -#~ "identical to the SuperNode image." +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to :doc:`Example:" +#~ " PyTorch - From Centralized To " +#~ "Federated `." +#~ msgstr "" + +#~ msgid "Centralized Training" #~ msgstr "" #~ msgid "" -#~ "Similar to the SuperNode image, the " -#~ "ServerApp Docker image comes with a " -#~ "pre-installed version of Flower and " -#~ "serves as a base for building your" -#~ " own ServerApp image." +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called ``cifar.py``, revised part is " +#~ "shown below:" #~ msgstr "" #~ msgid "" -#~ "We will use the same ``quickstart-" -#~ "pytorch`` example as we do in the" -#~ " Flower SuperNode section. If you " -#~ "have not already done so, please " -#~ "follow the `SuperNode Prerequisites`_ before" -#~ " proceeding." +#~ "The model architecture defined in class" +#~ " Net() is added with Batch " +#~ "Normalization layers accordingly." #~ msgstr "" -#~ msgid "Creating a ServerApp Dockerfile" +#~ msgid "You can now run your machine learning workload:" #~ msgstr "" #~ msgid "" -#~ "First, we need to create a " -#~ "Dockerfile in the directory where the" -#~ " ``ServerApp`` code is located. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" -#~ "pytorch``." +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the system consists of one " +#~ "server and two clients." #~ msgstr "" -#~ msgid "" -#~ "The ``Dockerfile.serverapp`` contains the " -#~ "instructions that assemble the ServerApp " -#~ "image." +#~ msgid "Federated Training" #~ msgstr "" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the ServerApp " -#~ "image tagged ``1.8.0`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. In the last two " -#~ "lines, we copy the ``server.py`` module" -#~ " into the image and set the " -#~ "entry point to ``flower-server-app`` " -#~ "with the argument ``server:app``. The " -#~ "argument is the object reference of " -#~ "the ServerApp (``:``) that " -#~ "will be run inside the ServerApp " -#~ "container." +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only ``get_parameters`` " +#~ "and ``set_parameters`` function in " +#~ "``client.py`` needed to revise. If not," +#~ " please read the :doc:`Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`. first." #~ msgstr "" -#~ msgid "Building the ServerApp Docker image" -#~ msgstr "Construindo a imagem do servidor" - #~ msgid "" -#~ "Next, we build the ServerApp Docker " -#~ "image by running the following command" -#~ " in the directory where Dockerfile " -#~ "and ServerApp code are located." +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ "``server.py`` keeps unchanged, we can " +#~ "start the server directly." #~ msgstr "" #~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_serverapp``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." +#~ "Finally, we will revise our *client* " +#~ "logic by changing ``get_parameters`` and " +#~ "``set_parameters`` in ``client.py``, we will" +#~ " exclude batch normalization parameters " +#~ "from model parameter list when sending" +#~ " to or receiving from the server." #~ msgstr "" -#~ msgid "Running the ServerApp Docker image" -#~ msgstr "Construindo a imagem do servidor" - -#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgid "Now, you can now open two additional terminal windows and run" #~ msgstr "" -#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning with FedBN strategy across two" +#~ " clients. Congratulations!" #~ msgstr "" #~ msgid "" -#~ "``--superlink 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "The full source code for this " +#~ "example can be found `here " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using different subsets of CIFAR-10 " +#~ "on each client? How about adding " +#~ "more clients?" +#~ msgstr "" + +#~ msgid "Example: PyTorch - From Centralized To Federated" #~ msgstr "" #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your ServerApps." +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload. We are using PyTorch to" +#~ " train a Convolutional Neural Network " +#~ "on the CIFAR-10 dataset. First, we " +#~ "introduce this machine learning task " +#~ "with a centralized training approach " +#~ "based on the `Deep Learning with " +#~ "PyTorch " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "ServerApp binary. To see all available" -#~ " flags that the ServerApp supports, " -#~ "run:" +#~ "We begin with a brief description " +#~ "of the centralized CNN training code." +#~ " If you want a more in-depth" +#~ " explanation of what's going on then" +#~ " have a look at the official " +#~ "`PyTorch tutorial " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your ServerApp container." +#~ "Let's create a new file called " +#~ "``cifar.py`` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as ``torch`` and" +#~ " ``torchvision``) need to be imported. " +#~ "You can see that we do not " +#~ "import any package for federated " +#~ "learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flags when starting the container." -#~ msgstr "" - -#~ msgid "Advanced Docker options" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in ``class Net()``." #~ msgstr "" -#~ msgid "Run with root user privileges" +#~ msgid "" +#~ "The ``load_data()`` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ "``transform`` normalized the data after " +#~ "loading." #~ msgstr "" #~ msgid "" -#~ "Flower Docker images, by default, run" -#~ " with a non-root user " -#~ "(username/groupname: ``app``, UID/GID: ``49999``)." -#~ " Using root user is not recommended" -#~ " unless it is necessary for specific" -#~ " tasks during the build process. " -#~ "Always make sure to run the " -#~ "container as a non-root user in" -#~ " production to maintain security best " -#~ "practices." +#~ "We now need to define the training" +#~ " (function ``train()``) which loops over" +#~ " the training set, measures the loss," +#~ " backpropagates it, and then takes " +#~ "one optimizer step for each batch " +#~ "of training examples." #~ msgstr "" -#~ msgid "**Run a container with root user privileges**" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function ``test()``. The" +#~ " function loops over all test samples" +#~ " and measures the loss of the " +#~ "model based on the test dataset." #~ msgstr "" -#~ msgid "**Run the build process with root user privileges**" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our CNN on CIFAR-10." #~ msgstr "" -#~ msgid "Using a different Flower version" +#~ msgid "" +#~ "So far, this should all look " +#~ "fairly familiar if you've used PyTorch" +#~ " before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." #~ msgstr "" -#~ msgid "Pinning a Docker image to a specific version" +#~ msgid "" +#~ "The simple machine learning project " +#~ "discussed in the previous section trains" +#~ " the model on a single dataset " +#~ "(CIFAR-10), we call this centralized " +#~ "learning. This concept of centralized " +#~ "learning, as shown in the previous " +#~ "section, is probably known to most " +#~ "of you, and many of you have " +#~ "used it previously. Normally, if you'd" +#~ " want to run machine learning " +#~ "workloads in a federated fashion, then" +#~ " you'd have to change most of " +#~ "your code and set everything up " +#~ "from scratch. This can be a " +#~ "considerable effort." #~ msgstr "" #~ msgid "" -#~ "It may happen that we update the" -#~ " images behind the tags. Such updates" -#~ " usually include security updates of " -#~ "system dependencies that should not " -#~ "change the functionality of Flower. " -#~ "However, if you want to ensure " -#~ "that you always use the same " -#~ "image, you can specify the hash of" -#~ " the image instead of the tag." +#~ "However, with Flower you can evolve " +#~ "your pre-existing code into a " +#~ "federated learning setup without the " +#~ "need for a major rewrite." #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``superlink:1.8.0`` tag:" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in ``cifar.py``" +#~ " for the *clients* that are connected" +#~ " to the *server*. The *server* sends" +#~ " model parameters to the clients. The" +#~ " *clients* run the training and " +#~ "update the parameters. The updated " +#~ "parameters are sent back to the " +#~ "*server* which averages all received " +#~ "parameter updates. This describes one " +#~ "round of the federated learning process" +#~ " and we repeat this for multiple " +#~ "rounds." #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ "``server.py`` first. The *server* needs " +#~ "to import the Flower package ``flwr``." +#~ " Next, we use the ``start_server`` " +#~ "function to start a server and " +#~ "tell it to perform three rounds of" +#~ " federated learning." #~ msgstr "" -#~ msgid "Setting environment variables" +#~ msgid "We can already start the *server*:" #~ msgstr "" #~ msgid "" -#~ "To set a variable inside a Docker" -#~ " container, you can use the ``-e " -#~ "=`` flag." +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined centralized " +#~ "training in ``cifar.py``. Our *client* " +#~ "needs to import ``flwr``, but also " +#~ "``torch`` to update the parameters on" +#~ " our PyTorch model:" #~ msgstr "" #~ msgid "" -#~ "This approach consists of two seprate" -#~ " phases: clipping of the updates and" -#~ " adding noise to the aggregated " -#~ "model. For the clipping phase, Flower" -#~ " framework has made it possible to" -#~ " decide whether to perform clipping " -#~ "on the server side or the client" -#~ " side." +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``CifarClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. ``CifarClient``" +#~ " needs to implement four methods, two" +#~ " methods for getting/setting model " +#~ "parameters, one method for training the" +#~ " model, and one method for testing" +#~ " the model:" #~ msgstr "" +#~ msgid "``set_parameters``" +#~ msgstr "``SETUPTOOLS_VERSION``" + #~ msgid "" -#~ "The :code:`on_fit_config_fn` can be used " -#~ "to pass arbitrary configuration values " -#~ "from server to client, and poetentially" -#~ " change these values each round, for" -#~ " example, to adjust the learning " -#~ "rate. The client will receive the " -#~ "dictionary returned by the " -#~ ":code:`on_fit_config_fn` in its own " -#~ ":code:`client.fit()` function." +#~ "set the model parameters on the " +#~ "local model that are received from " +#~ "the server" #~ msgstr "" #~ msgid "" -#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " -#~ "` | :doc:`🤗 " -#~ "Transformers ` " -#~ "| :doc:`JAX ` |" -#~ " :doc:`Pandas ` " -#~ "| :doc:`fastai `" -#~ " | :doc:`PyTorch Lightning ` | :doc:`scikit-" -#~ "learn ` | " -#~ ":doc:`XGBoost ` |" -#~ " :doc:`Android ` " -#~ "| :doc:`iOS `" +#~ "loop over the list of model " +#~ "parameters received as NumPy ``ndarray``'s " +#~ "(think list of neural network layers)" #~ msgstr "" -#~ msgid "flower-client-app" +#~ msgid "``get_parameters``" #~ msgstr "" -#~ msgid ":py:obj:`flwr.client `\\" +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ "``ndarray``'s (which is what " +#~ "``flwr.client.NumPyClient`` expects)" #~ msgstr "" -#~ msgid ":py:obj:`flwr.common `\\" +#~ msgid "``fit``" #~ msgstr "" -#~ msgid ":py:obj:`flwr.server `\\" +#~ msgid "" +#~ "update the parameters of the local " +#~ "model with the parameters received from" +#~ " the server" #~ msgstr "" -#~ msgid ":py:obj:`flwr.simulation `\\" +#~ msgid "train the model on the local training set" #~ msgstr "" -#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgid "get the updated local model weights and return them to the server" #~ msgstr "" -#~ msgid "Run Flower client app." +#~ msgid "``evaluate``" #~ msgstr "" -#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgid "evaluate the updated model on the local test set" #~ msgstr "" -#~ msgid "Run Flower SuperNode." +#~ msgid "return the local loss and accuracy to the server" #~ msgstr "" -#~ msgid ":py:obj:`flwr.client.mod `\\" +#~ msgid "" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``test()`` " +#~ "previously defined in ``cifar.py``. So " +#~ "what we really do here is we " +#~ "tell Flower through our ``NumPyClient`` " +#~ "subclass which of our already defined" +#~ " functions to call for training and" +#~ " evaluation. We included type annotations" +#~ " to give you a better understanding" +#~ " of the data types that get " +#~ "passed around." #~ msgstr "" -#~ msgid ":py:obj:`Context `\\ \\(state\\)" +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ "``CifarClient``, and starts this client. " +#~ "You load your data and model by" +#~ " using ``cifar.py``. Start ``CifarClient`` " +#~ "with the function ``fl.client.start_client()`` " +#~ "by pointing it at the same IP " +#~ "address we used in ``server.py``:" #~ msgstr "" -#~ msgid "State of your run." +#~ msgid "And that's it. You can now open two additional terminal windows and run" #~ msgstr "" -#~ msgid "Metrics record." +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is running before you do " +#~ "so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning across two clients. Congratulations!" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:class:`int` | " -#~ ":py:class:`float` | :py:class:`str` | " -#~ ":py:class:`bytes` | :py:class:`bool` | " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" +#~ "The full source code for this " +#~ "example: `PyTorch: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is, of course, somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using different subsets of CIFAR-10 " +#~ "on each client? How about adding " +#~ "more clients?" #~ msgstr "" -#~ msgid "Remove all items from R." +#~ msgid "" +#~ "For a full code example that uses" +#~ " both centralized and federated evaluation," +#~ " see the *Advanced TensorFlow Example* " +#~ "(the same approach can be applied " +#~ "to workloads implemented in any other" +#~ " framework): " +#~ "https://github.com/adap/flower/tree/main/examples/advanced-" +#~ "tensorflow" #~ msgstr "" -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgid "" +#~ "To help you start and manage all" +#~ " of the concurrently executing training " +#~ "runs, Flower offers one additional " +#~ "long-running server-side service called " +#~ "**SuperExec**. When you type ``flwr " +#~ "run`` to start a new training run," +#~ " the ``flwr`` CLI bundles your local" +#~ " project (mainly your ``ServerApp`` and " +#~ "``ClientApp``) and sends it to the " +#~ "**SuperExec**. The **SuperExec** will then " +#~ "take care of starting and managing " +#~ "your ``ServerApp``, which in turn " +#~ "selects SuperNodes to execute your " +#~ "``ClientApp``." #~ msgstr "" -#~ msgid "d defaults to None." +#~ msgid "" +#~ "This architecture allows many users to" +#~ " (concurrently) run their projects on " +#~ "the same federation, simply by typing" +#~ " ``flwr run`` on their local " +#~ "developer machine." #~ msgstr "" -#~ msgid "Update R from dict/iterable E and F." +#~ msgid "Flower Deployment Engine with SuperExec" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" +#~ msgid "The SuperExec service for managing concurrent training runs in Flower." #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" +#~ msgid "FED Template" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" +#~ msgid "Table of Contents" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" +#~ msgid "[Table of Contents](#table-of-contents)" #~ msgstr "" -#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgid "[Summary](#summary)" #~ msgstr "" -#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgid "[Motivation](#motivation)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" +#~ msgid "[Goals](#goals)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" +#~ msgid "[Non-Goals](#non-goals)" #~ msgstr "" -#~ msgid "" -#~ "An identifier that can be used " -#~ "when loading a particular data partition" -#~ " for a ClientApp. Making use of " -#~ "this identifier is more relevant when" -#~ " conducting simulations." +#~ msgid "[Proposal](#proposal)" #~ msgstr "" -#~ msgid ":py:obj:`partition_id `\\" +#~ msgid "[Drawbacks](#drawbacks)" #~ msgstr "" -#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgid "[Alternatives Considered](#alternatives-considered)" #~ msgstr "" -#~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:class:`int` | " -#~ ":py:class:`float` | :py:class:`~typing.List`\\ " -#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " -#~ "[:py:class:`float`]]" +#~ msgid "[Appendix](#appendix)" #~ msgstr "" -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgid "Summary" #~ msgstr "" -#~ msgid "" -#~ "A dataclass storing named Arrays in " -#~ "order. This means that it holds " -#~ "entries as an OrderedDict[str, Array]. " -#~ "ParametersRecord objects can be viewed " -#~ "as an equivalent to PyTorch's " -#~ "state_dict, but holding serialised tensors " -#~ "instead." +#~ msgid "\\[TODO - sentence 1: summary of the problem\\]" #~ msgstr "" -#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgid "\\[TODO - sentence 2: summary of the solution\\]" #~ msgstr "" -#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgid "Motivation" #~ msgstr "" -#~ msgid "Run Flower server app." +#~ msgid "\\[TODO\\]" #~ msgstr "" -#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgid "Goals" #~ msgstr "" -#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgid "Non-Goals" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`LegacyContext `\\ " -#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgid "Proposal" #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgid "Drawbacks" #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgid "Alternatives Considered" #~ msgstr "" -#~ msgid "run\\_driver\\_api" +#~ msgid "\\[Alternative 1\\]" #~ msgstr "" -#~ msgid "run\\_fleet\\_api" +#~ msgid "\\[Alternative 2\\]" #~ msgstr "" -#~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg+ configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ msgid "Flower Enhancement Doc" #~ msgstr "" -#~ msgid "key shares." +#~ msgid "[Enhancement Doc Template](#enhancement-doc-template)" #~ msgstr "" -#~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ msgid "[Metadata](#metadata)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`start_simulation `\\" -#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgid "[Workflow](#workflow)" #~ msgstr "" -#~ msgid "" -#~ "'A dictionary, e.g {\"\": , " -#~ "\"\": } to configure a " -#~ "backend. Values supported in are" -#~ " those included by " -#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgid "[GitHub Issues](#github-issues)" #~ msgstr "" -#~ msgid "" -#~ "When diabled, only INFO, WARNING and " -#~ "ERROR log messages will be shown. " -#~ "If enabled, DEBUG-level logs will " -#~ "be displayed." +#~ msgid "[Google Docs](#google-docs)" #~ msgstr "" -#~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type Client. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ msgid "A Flower Enhancement is a standardized development process to" +#~ msgstr "" + +#~ msgid "provide a common structure for proposing larger changes" #~ msgstr "" -#~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." +#~ msgid "ensure that the motivation for a change is clear" #~ msgstr "" -#~ msgid "" -#~ "List `client_id`s for each client. This" -#~ " is only required if `num_clients` is" -#~ " not set. Setting both `num_clients` " -#~ "and `clients_ids` with `len(clients_ids)` not" -#~ " equal to `num_clients` generates an " -#~ "error." +#~ msgid "persist project information in a version control system" #~ msgstr "" -#~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Convolutional Neural " -#~ "Network on CIFAR10 using Flower and " -#~ "PyTorch." +#~ msgid "document the motivation for impactful user-facing changes" #~ msgstr "" -#~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight-updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." +#~ msgid "reserve GitHub issues for tracking work in flight" #~ msgstr "" #~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started. We first need to install " -#~ "Flower. You can do this by running" -#~ " :" +#~ "ensure community participants can successfully" +#~ " drive changes to completion across " +#~ "one or more releases while stakeholders" +#~ " are adequately represented throughout the" +#~ " process" #~ msgstr "" -#~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead and install PyTorch and " -#~ "the **torchvision** library:" +#~ msgid "Hence, an Enhancement Doc combines aspects of" #~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Deep Learning with" -#~ " PyTorch " -#~ "`_." +#~ msgid "a feature, and effort-tracking document" #~ msgstr "" -#~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and PyTorch related " -#~ "packages:" +#~ msgid "a product requirements document" #~ msgstr "" -#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgid "a design document" #~ msgstr "" #~ msgid "" -#~ "We use PyTorch to load CIFAR10, a" -#~ " popular colored image classification " -#~ "dataset for machine learning. The " -#~ "PyTorch :code:`DataLoader()` downloads the " -#~ "training and test data that are " -#~ "then normalized." +#~ "into one file, which is created " +#~ "incrementally in collaboration with the " +#~ "community." #~ msgstr "" #~ msgid "" -#~ "Define the loss and optimizer with " -#~ "PyTorch. The training of the dataset " -#~ "is done by looping over the " -#~ "dataset, measure the corresponding loss " -#~ "and optimize it." +#~ "For far-fetching changes or features " +#~ "proposed to Flower, an abstraction " +#~ "beyond a single GitHub issue or " +#~ "pull request is required to understand" +#~ " and communicate upcoming changes to " +#~ "the project." #~ msgstr "" #~ msgid "" -#~ "Define then the validation of the " -#~ "machine learning network. We loop over" -#~ " the test set and measure the " -#~ "loss and accuracy of the test set." +#~ "The purpose of this process is to" +#~ " reduce the amount of \"tribal " +#~ "knowledge\" in our community. By moving" +#~ " decisions from Slack threads, video " +#~ "calls, and hallway conversations into a" +#~ " well-tracked artifact, this process " +#~ "aims to enhance communication and " +#~ "discoverability." #~ msgstr "" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a PyTorch machine learning model," -#~ " we use the functions for the " -#~ "Flower clients." +#~ "Roughly any larger, user-facing " +#~ "enhancement should follow the Enhancement " +#~ "process. If an enhancement would be " +#~ "described in either written or verbal" +#~ " communication to anyone besides the " +#~ "author or developer, then consider " +#~ "creating an Enhancement Doc." #~ msgstr "" #~ msgid "" -#~ "The Flower clients will use a " -#~ "simple CNN adapted from 'PyTorch: A " -#~ "60 Minute Blitz':" +#~ "Similarly, any technical effort (refactoring," +#~ " major architectural change) that will " +#~ "impact a large section of the " +#~ "development community should also be " +#~ "communicated widely. The Enhancement process" +#~ " is suited for this even if it" +#~ " will have zero impact on the " +#~ "typical user or operator." #~ msgstr "" #~ msgid "" -#~ "After loading the data set with " -#~ ":code:`load_data()` we define the Flower " -#~ "interface." +#~ "For small changes and additions, going" +#~ " through the Enhancement process would " +#~ "be time-consuming and unnecessary. This" +#~ " includes, for example, adding new " +#~ "Federated Learning algorithms, as these " +#~ "only add features without changing how" +#~ " Flower works or is used." #~ msgstr "" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "PyTorch. Implementing :code:`NumPyClient` usually" -#~ " means defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "Enhancements are different from feature " +#~ "requests, as they are already providing" +#~ " a laid-out path for implementation" +#~ " and are championed by members of " +#~ "the community." #~ msgstr "" -#~ msgid "receive the updated local model weights" +#~ msgid "" +#~ "An Enhancement is captured in a " +#~ "Markdown file that follows a defined " +#~ "template and a workflow to review " +#~ "and store enhancement docs for reference" +#~ " — the Enhancement Doc." #~ msgstr "" -#~ msgid "which can be implemented in the following way:" +#~ msgid "Enhancement Doc Template" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-" -#~ "pytorch`." +#~ "Each enhancement doc is provided as " +#~ "a Markdown file having the following " +#~ "structure" #~ msgstr "" -#~ msgid "" -#~ "In this example, we split the " -#~ "dataset into two partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " -#~ "Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`node_id`:" +#~ msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" #~ msgstr "" -#~ msgid "" -#~ "The :code:`self.bst` is used to keep " -#~ "the Booster objects that remain " -#~ "consistent across rounds, allowing them " -#~ "to store predictions from trees " -#~ "integrated in earlier rounds and " -#~ "maintain other essential data structures " -#~ "for training." +#~ msgid "Title (same as in metadata)" #~ msgstr "" -#~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. the " -#~ "returned Booster object and config are" -#~ " stored in :code:`self.bst` and " -#~ ":code:`self.config`, respectively. From the " -#~ "second round, we load the global " -#~ "model sent from server to " -#~ ":code:`self.bst`, and then update model " -#~ "weights on local training data with " -#~ "function :code:`local_boost` as follows:" +#~ msgid "Table of Contents (if needed)" +#~ msgstr "" + +#~ msgid "Notes/Constraints/Caveats (optional)" +#~ msgstr "" + +#~ msgid "Design Details (optional)" +#~ msgstr "" + +#~ msgid "Graduation Criteria" +#~ msgstr "" + +#~ msgid "Upgrade/Downgrade Strategy (if applicable)" +#~ msgstr "" + +#~ msgid "As a reference, this document follows the above structure." #~ msgstr "" #~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`self.bst.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." +#~ "**fed-number** (Required) The `fed-" +#~ "number` of the last Flower Enhancement" +#~ " Doc + 1. With this number, it" +#~ " becomes easy to reference other " +#~ "proposals." +#~ msgstr "" + +#~ msgid "**title** (Required) The title of the proposal in plain language." #~ msgstr "" #~ msgid "" -#~ "In :code:`evaluate`, we call " -#~ ":code:`self.bst.eval_set` function to conduct " -#~ "evaluation on valid set. The AUC " -#~ "value will be returned." +#~ "**status** (Required) The current status " +#~ "of the proposal. See [workflow](#workflow) " +#~ "for the possible states." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client`and call" -#~ " :code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "**authors** (Required) A list of authors" +#~ " of the proposal. This is simply " +#~ "the GitHub ID." #~ msgstr "" #~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients." +#~ "**creation-date** (Required) The date " +#~ "that the proposal was first submitted" +#~ " in a PR." #~ msgstr "" #~ msgid "" -#~ "Welcome to the third part of the" -#~ " Flower federated learning tutorial. In " -#~ "previous parts of this tutorial, we " -#~ "introduced federated learning with PyTorch " -#~ "and Flower (`part 1 " -#~ "`__) and we " -#~ "learned how strategies can be used " -#~ "to customize the execution on both " -#~ "the server and the clients (`part " -#~ "2 `__)." +#~ "**last-updated** (Optional) The date " +#~ "that the proposal was last changed " +#~ "significantly." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll continue to " -#~ "customize the federated learning system " -#~ "we built previously by creating a " -#~ "custom version of FedAvg (again, using" -#~ " `Flower `__ and `PyTorch " -#~ "`__)." +#~ "**see-also** (Optional) A list of " +#~ "other proposals that are relevant to " +#~ "this one." +#~ msgstr "" + +#~ msgid "**replaces** (Optional) A list of proposals that this one replaces." #~ msgstr "" #~ msgid "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ and join " -#~ "the Flower community on Slack to " -#~ "connect, ask questions, and get help:" -#~ " `Join Slack `__" -#~ " 🌼 We'd love to hear from you" -#~ " in the ``#introductions`` channel! And " -#~ "if anything is unclear, head over " -#~ "to the ``#questions`` channel." +#~ "**superseded-by** (Optional) A list of" +#~ " proposals that this one supersedes." #~ msgstr "" -#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgid "Workflow" #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap everything in their own " -#~ "``DataLoader``. We introduce a new " -#~ "parameter ``num_clients`` which allows us " -#~ "to call ``load_datasets`` with different " -#~ "numbers of clients." +#~ "The idea forming the enhancement should" +#~ " already have been discussed or " +#~ "pitched in the community. As such, " +#~ "it needs a champion, usually the " +#~ "author, who shepherds the enhancement. " +#~ "This person also has to find " +#~ "committers to Flower willing to review" +#~ " the proposal." #~ msgstr "" #~ msgid "" -#~ "To implement the Flower client, we " -#~ "(again) create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``. Here, we also pass the" -#~ " ``cid`` to the client and use " -#~ "it log additional details:" +#~ "New enhancements are checked in with " +#~ "a file name in the form of " +#~ "`NNNN-YYYYMMDD-enhancement-title.md`, with " +#~ "`NNNN` being the Flower Enhancement Doc" +#~ " number, to `enhancements`. All " +#~ "enhancements start in `provisional` state " +#~ "as part of a pull request. " +#~ "Discussions are done as part of " +#~ "the pull request review." #~ msgstr "" #~ msgid "" -#~ "Let's go deeper and see what it" -#~ " takes to move from ``NumPyClient`` " -#~ "to ``Client``!" +#~ "Once an enhancement has been reviewed" +#~ " and approved, its status is changed" +#~ " to `implementable`. The actual " +#~ "implementation is then done in separate" +#~ " pull requests. These pull requests " +#~ "should mention the respective enhancement " +#~ "as part of their description. After " +#~ "the implementation is done, the proposal" +#~ " status is changed to `implemented`." #~ msgstr "" #~ msgid "" -#~ "So far, we've implemented our client " -#~ "by subclassing ``flwr.client.NumPyClient``. The " -#~ "three methods we implemented are " -#~ "``get_parameters``, ``fit``, and ``evaluate``. " -#~ "Finally, we wrap the creation of " -#~ "instances of this class in a " -#~ "function called ``client_fn``:" +#~ "Under certain conditions, other states " +#~ "are possible. An Enhancement has the " +#~ "following states:" #~ msgstr "" #~ msgid "" -#~ "We've seen this before, there's nothing" -#~ " new so far. The only *tiny* " -#~ "difference compared to the previous " -#~ "notebook is naming, we've changed " -#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " -#~ "``client_fn`` to ``numpyclient_fn``. Let's run" -#~ " it to see the output we get:" +#~ "`provisional`: The enhancement has been " +#~ "proposed and is actively being defined." +#~ " This is the starting state while " +#~ "the proposal is being fleshed out " +#~ "and actively defined and discussed." +#~ msgstr "" + +#~ msgid "`implementable`: The enhancement has been reviewed and approved." #~ msgstr "" #~ msgid "" -#~ "This works as expected, two clients " -#~ "are training for three rounds of " -#~ "federated learning." +#~ "`implemented`: The enhancement has been " +#~ "implemented and is no longer actively" +#~ " changed." #~ msgstr "" #~ msgid "" -#~ "Let's dive a little bit deeper and" -#~ " discuss how Flower executes this " -#~ "simulation. Whenever a client is " -#~ "selected to do some work, " -#~ "``start_simulation`` calls the function " -#~ "``numpyclient_fn`` to create an instance " -#~ "of our ``FlowerNumPyClient`` (along with " -#~ "loading the model and the data)." +#~ "`deferred`: The enhancement is proposed " +#~ "but not actively being worked on." #~ msgstr "" #~ msgid "" -#~ "`Check out Flower Code Examples " -#~ "`__" +#~ "`rejected`: The authors and reviewers " +#~ "have decided that this enhancement is" +#~ " not moving forward." +#~ msgstr "" + +#~ msgid "`withdrawn`: The authors have withdrawn the enhancement." +#~ msgstr "" + +#~ msgid "`replaced`: The enhancement has been replaced by a new enhancement." #~ msgstr "" #~ msgid "" -#~ "`Watch Flower Summit 2023 videos " -#~ "`__" +#~ "Adding an additional process to the " +#~ "ones already provided by GitHub (Issues" +#~ " and Pull Requests) adds more " +#~ "complexity and can be a barrier " +#~ "for potential first-time contributors." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower, " -#~ "`Flower Datasets `__ " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "Expanding the proposal template beyond " +#~ "the single-sentence description currently " +#~ "required in the features issue template" +#~ " may be a heavy burden for " +#~ "non-native English speakers." +#~ msgstr "" + +#~ msgid "GitHub Issues" +#~ msgstr "" + +#~ msgid "" +#~ "Using GitHub Issues for these kinds " +#~ "of enhancements is doable. One could " +#~ "use, for example, tags, to differentiate" +#~ " and filter them from other issues." +#~ " The main issue is in discussing " +#~ "and reviewing an enhancement: GitHub " +#~ "issues only have a single thread " +#~ "for comments. Enhancements usually have " +#~ "multiple threads of discussion at the" +#~ " same time for various parts of " +#~ "the doc. Managing these multiple " +#~ "discussions can be confusing when using" +#~ " GitHub Issues." #~ msgstr "" -#~ msgid "Loading the data" +#~ msgid "Google Docs" #~ msgstr "" #~ msgid "" -#~ "We simulate having multiple datasets " -#~ "from multiple organizations (also called " -#~ "the \"cross-silo\" setting in federated" -#~ " learning) by splitting the original " -#~ "CIFAR-10 dataset into multiple partitions. " -#~ "Each partition will represent the data" -#~ " from a single organization. We're " -#~ "doing this purely for experimentation " -#~ "purposes, in the real world there's " -#~ "no need for data splitting because " -#~ "each organization already has their own" -#~ " data (so the data is naturally " -#~ "partitioned)." +#~ "Google Docs allow for multiple threads" +#~ " of discussions. But as Google Docs" +#~ " are hosted outside the project, " +#~ "their discoverability by the community " +#~ "needs to be taken care of. A " +#~ "list of links to all proposals has" +#~ " to be managed and made available " +#~ "for the community. Compared to shipping" +#~ " proposals as part of Flower's " +#~ "repository, the potential for missing " +#~ "links is much higher." +#~ msgstr "" + +#~ msgid "FED - Flower Enhancement Doc" +#~ msgstr "" + +#~ msgid "Configure clients" #~ msgstr "" #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server." +#~ "Along with model parameters, Flower can" +#~ " send configuration values to clients. " +#~ "Configuration values can be used for " +#~ "various purposes. They are, for example," +#~ " a popular way to control client-" +#~ "side hyperparameters from the server." #~ msgstr "" #~ msgid "" -#~ "Let's now create the Federated Dataset" -#~ " abstraction that from ``flwr-datasets``" -#~ " that partitions the CIFAR-10. We " -#~ "will create small training and test " -#~ "set for each edge device and wrap" -#~ " each of them into a PyTorch " -#~ "``DataLoader``:" +#~ "Configuration values are represented as " +#~ "a dictionary with ``str`` keys and " +#~ "values of type ``bool``, ``bytes``, " +#~ "``double`` (64-bit precision float), ``int``," +#~ " or ``str`` (or equivalent types in" +#~ " different languages). Here is an " +#~ "example of a configuration dictionary in" +#~ " Python:" #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4000 training examples and" -#~ " 1000 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "Flower serializes these configuration " +#~ "dictionaries (or *config dict* for " +#~ "short) to their ProtoBuf representation, " +#~ "transports them to the client using " +#~ "gRPC, and then deserializes them back" +#~ " to Python dictionaries." #~ msgstr "" #~ msgid "" -#~ "Let's take a look at the first " -#~ "batch of images and labels in the" -#~ " first training set (i.e., " -#~ "``trainloaders[0]``) before we move on:" +#~ "Currently, there is no support for " +#~ "directly sending collection types (e.g., " +#~ "``Set``, ``List``, ``Map``) as values in" +#~ " configuration dictionaries. There are " +#~ "several workarounds to send collections " +#~ "as values by converting them to " +#~ "one of the supported value types " +#~ "(and converting them back on the " +#~ "client-side)." #~ msgstr "" #~ msgid "" -#~ "The output above shows a random " -#~ "batch of images from the first " -#~ "``trainloader`` in our list of ten " -#~ "``trainloaders``. It also prints the " -#~ "labels associated with each image (i.e.," -#~ " one of the ten possible labels " -#~ "we've seen above). If you run the" -#~ " cell again, you should see another" -#~ " batch of images." +#~ "One can, for example, convert a " +#~ "list of floating-point numbers to " +#~ "a JSON string, then send the JSON" +#~ " string using the configuration dictionary," +#~ " and then convert the JSON string " +#~ "back to a list of floating-point" +#~ " numbers on the client." #~ msgstr "" -#~ msgid "Defining the model" +#~ msgid "Configuration through built-in strategies" #~ msgstr "" -#~ msgid "Training the model" +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like ``FedAvg``. " +#~ "Built-in strategies support so-called " +#~ "configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." #~ msgstr "" #~ msgid "" -#~ "We now have all the basic building" -#~ " blocks we need: a dataset, a " -#~ "model, a training function, and a " -#~ "test function. Let's put them together" -#~ " to train the model on the " -#~ "dataset of one of our organizations " -#~ "(``trainloaders[0]``). This simulates the " -#~ "reality of most machine learning " -#~ "projects today: each organization has " -#~ "their own data and trains models " -#~ "only on this internal data:" +#~ "Let's start with a simple example. " +#~ "Imagine we want to send (a) the" +#~ " batch size that the client should" +#~ " use, (b) the current global round" +#~ " of federated learning, and (c) the" +#~ " number of epochs to train on " +#~ "the client-side. Our configuration " +#~ "function could look like this:" #~ msgstr "" #~ msgid "" -#~ "Training the simple CNN on our " -#~ "CIFAR-10 split for 5 epochs should " -#~ "result in a test set accuracy of" -#~ " about 41%, which is not good, " -#~ "but at the same time, it doesn't" -#~ " really matter for the purposes of" -#~ " this tutorial. The intent was just" -#~ " to show a simplistic centralized " -#~ "training pipeline that sets the stage" -#~ " for what comes next - federated " -#~ "learning!" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter ``on_fit_config_fn``:" #~ msgstr "" -#~ msgid "Updating model parameters" +#~ msgid "" +#~ "One the client side, we receive " +#~ "the configuration dictionary in ``fit``:" #~ msgstr "" #~ msgid "" -#~ "In federated learning, the server sends" -#~ " the global model parameters to the" -#~ " client, and the client updates the" -#~ " local model with the parameters " -#~ "received from the server. It then " -#~ "trains the model on the local data" -#~ " (which changes the model parameters " -#~ "locally) and sends the updated/changed " -#~ "model parameters back to the server " -#~ "(or, alternatively, it sends just the" -#~ " gradients back to the server, not" -#~ " the full model parameters)." +#~ "There is also an `on_evaluate_config_fn` " +#~ "to configure evaluation, which works the" +#~ " same way. They are separate " +#~ "functions because one might want to " +#~ "send different configuration values to " +#~ "`evaluate` (for example, to use a " +#~ "different batch size)." #~ msgstr "" #~ msgid "" -#~ "The details of how this works are" -#~ " not really important here (feel free" -#~ " to consult the PyTorch documentation " -#~ "if you want to learn more). In " -#~ "essence, we use ``state_dict`` to access" -#~ " PyTorch model parameter tensors. The " -#~ "parameter tensors are then converted " -#~ "to/from a list of NumPy ndarray's " -#~ "(which Flower knows how to " -#~ "serialize/deserialize):" +#~ "The built-in strategies call this " +#~ "function every round (that is, every " +#~ "time `Strategy.configure_fit` or " +#~ "`Strategy.configure_evaluate` runs). Calling " +#~ "`on_evaluate_config_fn` every round allows us" +#~ " to vary/change the config dict over" +#~ " consecutive rounds. If we wanted to" +#~ " implement a hyperparameter schedule, for" +#~ " example, to increase the number of" +#~ " local epochs during later rounds, we" +#~ " could do the following:" #~ msgstr "" -#~ msgid "Implementing a Flower client" +#~ msgid "The ``FedAvg`` strategy will call this function *every round*." #~ msgstr "" -#~ msgid "" -#~ "With that out of the way, let's" -#~ " move on to the interesting part. " -#~ "Federated learning systems consist of a" -#~ " server and multiple clients. In " -#~ "Flower, we create clients by " -#~ "implementing subclasses of ``flwr.client.Client``" -#~ " or ``flwr.client.NumPyClient``. We use " -#~ "``NumPyClient`` in this tutorial because " -#~ "it is easier to implement and " -#~ "requires us to write less boilerplate." +#~ msgid "Configuring individual clients" #~ msgstr "" #~ msgid "" -#~ "To implement the Flower client, we " -#~ "create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``:" +#~ "In some cases, it is necessary to" +#~ " send different configuration values to " +#~ "different clients." #~ msgstr "" #~ msgid "" -#~ "``fit``: Receive model parameters from " -#~ "the server, train the model parameters" -#~ " on the local data, and return " -#~ "the (updated) model parameters to the" -#~ " server" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes ``FedAvg`` by adding a custom" +#~ " ``\"hello\": \"world\"`` configuration key/value" +#~ " pair to the config dict of a" +#~ " *single client* (only the first " +#~ "client in the list, the other " +#~ "clients in this round to not " +#~ "receive this \"special\" config value):" +#~ msgstr "" + +#~ msgid "Configure logging" #~ msgstr "" #~ msgid "" -#~ "``evaluate``: Receive model parameters from" -#~ " the server, evaluate the model " -#~ "parameters on the local data, and " -#~ "return the evaluation result to the " -#~ "server" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" #~ msgstr "" #~ msgid "" -#~ "Our class ``FlowerClient`` defines how " -#~ "local training/evaluation will be performed" -#~ " and allows Flower to call the " -#~ "local training/evaluation through ``fit`` and" -#~ " ``evaluate``. Each instance of " -#~ "``FlowerClient`` represents a *single client*" -#~ " in our federated learning system. " -#~ "Federated learning systems have multiple " -#~ "clients (otherwise, there's not much to" -#~ " federate), so each client will be" -#~ " represented by its own instance of" -#~ " ``FlowerClient``. If we have, for " -#~ "example, three clients in our workload," -#~ " then we'd have three instances of" -#~ " ``FlowerClient``. Flower calls " -#~ "``FlowerClient.fit`` on the respective " -#~ "instance when the server selects a " -#~ "particular client for training (and " -#~ "``FlowerClient.evaluate`` for evaluation)." +#~ "containing relevant information including: log" +#~ " message level (e.g. ``INFO``, ``DEBUG``)," +#~ " a timestamp, the line where the " +#~ "logging took place from, as well " +#~ "as the log message itself. In this" +#~ " way, the logger would typically " +#~ "display information on your terminal as" +#~ " follows:" #~ msgstr "" -#~ msgid "Using the Virtual Client Engine" +#~ msgid "Saving log to file" #~ msgstr "" #~ msgid "" -#~ "In this notebook, we want to " -#~ "simulate a federated learning system " -#~ "with 10 clients on a single " -#~ "machine. This means that the server " -#~ "and all 10 clients will live on" -#~ " a single machine and share resources" -#~ " such as CPU, GPU, and memory. " -#~ "Having 10 clients would mean having " -#~ "10 instances of ``FlowerClient`` in " -#~ "memory. Doing this on a single " -#~ "machine can quickly exhaust the " -#~ "available memory resources, even if only" -#~ " a subset of these clients " -#~ "participates in a single round of " -#~ "federated learning." +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ "``fl.server.start_server``) and when using the" +#~ " ``VirtualClientEngine`` (i.e. when you do" +#~ " ``fl.simulation.start_simulation``). In some " +#~ "situations you might want to save " +#~ "this log to disk. You can do " +#~ "so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" #~ msgstr "" #~ msgid "" -#~ "In addition to the regular capabilities" -#~ " where server and clients run on " -#~ "multiple machines, Flower, therefore, provides" -#~ " special simulation capabilities that " -#~ "create ``FlowerClient`` instances only when" -#~ " they are actually necessary for " -#~ "training or evaluation. To enable the" -#~ " Flower framework to create clients " -#~ "when necessary, we need to implement " -#~ "a function called ``client_fn`` that " -#~ "creates a ``FlowerClient`` instance on " -#~ "demand. Flower calls ``client_fn`` whenever" -#~ " it needs an instance of one " -#~ "particular client to call ``fit`` or " -#~ "``evaluate`` (those instances are usually " -#~ "discarded after use, so they should " -#~ "not keep any local state). Clients " -#~ "are identified by a client ID, or" -#~ " short ``cid``. The ``cid`` can be" -#~ " used, for example, to load different" -#~ " local data partitions for different " -#~ "clients, as can be seen below:" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to ``log.txt``. This file will be" +#~ " created in the same directory as " +#~ "were you are running the code " +#~ "from. If we inspect we see the " +#~ "log above is also recorded but " +#~ "prefixing with ``identifier`` each line:" #~ msgstr "" -#~ msgid "Starting the training" +#~ msgid "Log your own messages" #~ msgstr "" #~ msgid "" -#~ "We now have the class ``FlowerClient``" -#~ " which defines client-side " -#~ "training/evaluation and ``client_fn`` which " -#~ "allows Flower to create ``FlowerClient`` " -#~ "instances whenever it needs to call " -#~ "``fit`` or ``evaluate`` on one " -#~ "particular client. The last step is " -#~ "to start the actual simulation using " -#~ "``flwr.simulation.start_simulation``." +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." #~ msgstr "" #~ msgid "" -#~ "The function ``start_simulation`` accepts a" -#~ " number of arguments, amongst them " -#~ "the ``client_fn`` used to create " -#~ "``FlowerClient`` instances, the number of " -#~ "clients to simulate (``num_clients``), the " -#~ "number of federated learning rounds " -#~ "(``num_rounds``), and the strategy. The " -#~ "strategy encapsulates the federated learning" -#~ " approach/algorithm, for example, *Federated " -#~ "Averaging* (FedAvg)." +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." +#~ msgstr "" + +#~ msgid "Log to a remote service" #~ msgstr "" #~ msgid "" -#~ "Flower has a number of built-in" -#~ " strategies, but we can also use " -#~ "our own strategy implementations to " -#~ "customize nearly all aspects of the " -#~ "federated learning approach. For this " -#~ "example, we use the built-in " -#~ "``FedAvg`` implementation and customize it " -#~ "using a few basic parameters. The " -#~ "last step is the actual call to" -#~ " ``start_simulation`` which - you guessed" -#~ " it - starts the simulation:" +#~ "The ``fl.common.logger.configure`` function, also" +#~ " allows specifying a host to which" +#~ " logs can be pushed (via ``POST``)" +#~ " through a native Python " +#~ "``logging.handler.HTTPHandler``. This is a " +#~ "particularly useful feature in ``gRPC``-based" +#~ " Federated Learning workloads where " +#~ "otherwise gathering logs from all " +#~ "entities (i.e. the server and the " +#~ "clients) might be cumbersome. Note that" +#~ " in Flower simulation, the server " +#~ "automatically displays all logs. You can" +#~ " still specify a ``HTTPHandler`` should " +#~ "you wish to backup or analyze the" +#~ " logs somewhere else." +#~ msgstr "" + +#~ msgid "Monitor simulation" #~ msgstr "" #~ msgid "" -#~ "When we call ``start_simulation``, we " -#~ "tell Flower that there are 10 " -#~ "clients (``num_clients=10``). Flower then goes" -#~ " ahead an asks the ``FedAvg`` " -#~ "strategy to select clients. ``FedAvg`` " -#~ "knows that it should select 100% " -#~ "of the available clients " -#~ "(``fraction_fit=1.0``), so it goes ahead " -#~ "and selects 10 random clients (i.e., " -#~ "100% of 10)." +#~ "Flower allows you to monitor system " +#~ "resources while running your simulation. " +#~ "Moreover, the Flower simulation engine " +#~ "is powerful and enables you to " +#~ "decide how to allocate resources per " +#~ "client manner and constrain the total" +#~ " usage. Insights from resource consumption" +#~ " can help you make smarter decisions" +#~ " and speed up the execution time." #~ msgstr "" #~ msgid "" -#~ "Flower then asks the selected 10 " -#~ "clients to train the model. When " -#~ "the server receives the model parameter" -#~ " updates from the clients, it hands" -#~ " those updates over to the strategy" -#~ " (*FedAvg*) for aggregation. The strategy" -#~ " aggregates those updates and returns " -#~ "the new global model, which then " -#~ "gets used in the next round of " -#~ "federated learning." +#~ "The specific instructions assume you are" +#~ " using macOS and have the `Homebrew" +#~ " `_ package manager installed." +#~ msgstr "" + +#~ msgid "Downloads" #~ msgstr "" #~ msgid "" -#~ "The only thing left to do is " -#~ "to tell the strategy to call this" -#~ " function whenever it receives evaluation" -#~ " metric dictionaries from the clients:" +#~ "`Prometheus `_ is used " +#~ "for data collection, while `Grafana " +#~ "`_ will enable you to" +#~ " visualize the collected data. They " +#~ "are both well integrated with `Ray " +#~ "`_ which Flower uses " +#~ "under the hood." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll begin to " -#~ "customize the federated learning system " -#~ "we built in the introductory notebook" -#~ " (again, using `Flower `__" -#~ " and `PyTorch `__)." +#~ "Overwrite the configuration files (depending" +#~ " on your device, it might be " +#~ "installed on a different path)." #~ msgstr "" -#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgid "If you are on an M1 Mac, it should be:" #~ msgstr "" -#~ msgid "" -#~ "Flower, by default, initializes the " -#~ "global model by asking one random " -#~ "client for the initial parameters. In" -#~ " many cases, we want more control " -#~ "over parameter initialization though. Flower" -#~ " therefore allows you to directly " -#~ "pass the initial parameters to the " -#~ "Strategy:" +#~ msgid "On the previous generation Intel Mac devices, it should be:" #~ msgstr "" #~ msgid "" -#~ "Passing ``initial_parameters`` to the " -#~ "``FedAvg`` strategy prevents Flower from " -#~ "asking one of the clients for the" -#~ " initial parameters. If we look " -#~ "closely, we can see that the logs" -#~ " do not show any calls to the" -#~ " ``FlowerClient.get_parameters`` method." +#~ "Open the respective configuration files " +#~ "and change them. Depending on your " +#~ "device, use one of the two " +#~ "following commands:" #~ msgstr "" #~ msgid "" -#~ "We've seen the function ``start_simulation``" -#~ " before. It accepts a number of " -#~ "arguments, amongst them the ``client_fn`` " -#~ "used to create ``FlowerClient`` instances, " -#~ "the number of clients to simulate " -#~ "``num_clients``, the number of rounds " -#~ "``num_rounds``, and the strategy." +#~ "and then delete all the text in" +#~ " the file and paste a new " +#~ "Prometheus config you see below. You " +#~ "may adjust the time intervals to " +#~ "your requirements:" #~ msgstr "" #~ msgid "" -#~ "Next, we'll just pass this function " -#~ "to the FedAvg strategy before starting" -#~ " the simulation:" +#~ "Now after you have edited the " +#~ "Prometheus configuration, do the same " +#~ "with the Grafana configuration files. " +#~ "Open those using one of the " +#~ "following commands as before:" #~ msgstr "" #~ msgid "" -#~ "We now have 1000 partitions, each " -#~ "holding 45 training and 5 validation " -#~ "examples. Given that the number of " -#~ "training examples on each client is " -#~ "quite small, we should probably train" -#~ " the model a bit longer, so we" -#~ " configure the clients to perform 3" -#~ " local training epochs. We should " -#~ "also adjust the fraction of clients " -#~ "selected for training during each round" -#~ " (we don't want all 1000 clients " -#~ "participating in every round), so we " -#~ "adjust ``fraction_fit`` to ``0.05``, which " -#~ "means that only 5% of available " -#~ "clients (so 50 clients) will be " -#~ "selected for training each round:" +#~ "Your terminal editor should open and " +#~ "allow you to apply the following " +#~ "configuration as before." #~ msgstr "" -#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgid "" +#~ "Congratulations, you just downloaded all " +#~ "the necessary software needed for " +#~ "metrics tracking. Now, let’s start it." #~ msgstr "" -#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgid "Tracking metrics" #~ msgstr "" -#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgid "" +#~ "Before running your Flower simulation, " +#~ "you have to start the monitoring " +#~ "tools you have just installed and " +#~ "configured." #~ msgstr "" -#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "" +#~ "Please include the following argument in" +#~ " your Python code when starting a " +#~ "simulation." #~ msgstr "" -#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgid "Now, you are ready to start your workload." #~ msgstr "" -#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgid "" +#~ "Shortly after the simulation starts, you" +#~ " should see the following logs in " +#~ "your terminal:" #~ msgstr "" -#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgid "You can look at everything at http://127.0.0.1:8265 ." #~ msgstr "" -#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgid "" +#~ "It's a Ray Dashboard. You can " +#~ "navigate to Metrics (on the left " +#~ "panel, the lowest option)." #~ msgstr "" -#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgid "" +#~ "Or alternatively, you can just see " +#~ "them in Grafana by clicking on the" +#~ " right-up corner, “View in Grafana”." +#~ " Please note that the Ray dashboard" +#~ " is only accessible during the " +#~ "simulation. After the simulation ends, " +#~ "you can only use Grafana to " +#~ "explore the metrics. You can start " +#~ "Grafana by going to " +#~ "``http://localhost:3000/``." #~ msgstr "" -#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgid "" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port ``3000`` on your" +#~ " machine as long as they are " +#~ "running." #~ msgstr "" -#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "Resource allocation" #~ msgstr "" -#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "" +#~ "You must understand how the Ray " +#~ "library works to efficiently allocate " +#~ "system resources to simulation clients " +#~ "on your own." #~ msgstr "" -#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "" +#~ "Initially, the simulation (which Ray " +#~ "handles under the hood) starts by " +#~ "default with all the available resources" +#~ " on the system, which it shares " +#~ "among the clients. It doesn't mean " +#~ "it divides it equally among all of" +#~ " them, nor that the model training" +#~ " happens at all of them " +#~ "simultaneously. You will learn more " +#~ "about that in the later part of" +#~ " this blog. You can check the " +#~ "system resources by running the " +#~ "following:" #~ msgstr "" -#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "In Google Colab, the result you see might be similar to this:" #~ msgstr "" -#~ msgid ":py:obj:`client `\\" +#~ msgid "" +#~ "However, you can overwrite the defaults." +#~ " When starting a simulation, do the" +#~ " following (you don't need to " +#~ "overwrite all of them):" #~ msgstr "" -#~ msgid ":py:obj:`common `\\" +#~ msgid "Let’s also specify the resource for a single client." #~ msgstr "" -#~ msgid ":py:obj:`server `\\" +#~ msgid "" +#~ "Now comes the crucial part. Ray " +#~ "will start a new client only when" +#~ " it has all the required resources" +#~ " (such that they run in parallel) " +#~ "when the resources allow." #~ msgstr "" -#~ msgid ":py:obj:`simulation `\\" +#~ msgid "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ "``client_num_gpus = 0.5`` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ "``client_num_gpus = 2``, the simulation " +#~ "wouldn't start (even if you had 2" +#~ " GPUs but decided to set 1 in" +#~ " ``ray_init_args``)." #~ msgstr "" -#~ msgid ":py:obj:`mod `\\" +#~ msgid "Q: I don't see any metrics logged." #~ msgstr "" -#~ msgid "run\\_client\\_app" +#~ msgid "" +#~ "A: The timeframe might not be " +#~ "properly set. The setting is in " +#~ "the top right corner (\"Last 30 " +#~ "minutes\" by default). Please change the" +#~ " timeframe to reflect the period when" +#~ " the simulation was running." #~ msgstr "" -#~ msgid "run\\_supernode" +#~ msgid "" +#~ "Q: I see “Grafana server not " +#~ "detected. Please make sure the Grafana" +#~ " server is running and refresh this" +#~ " page” after going to the Metrics " +#~ "tab in Ray Dashboard." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get `\\ " -#~ "\\(key\\[\\, default\\]\\)" +#~ "A: You probably don't have Grafana " +#~ "running. Please check the running " +#~ "services" #~ msgstr "" -#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgid "" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to http://127.0.0.1:8265." #~ msgstr "" #~ msgid "" -#~ "When there isn't an exact match, " -#~ "all the existing keys in the " -#~ "layout map will be treated as a" -#~ " regex and map against the input " -#~ "key again. The first match will be" -#~ " returned, based on the key insertion" -#~ " order. Return None if there isn't" -#~ " any match found." +#~ "A: Either the simulation has already " +#~ "finished, or you still need to " +#~ "start Prometheus." #~ msgstr "" -#~ msgid "the string key as the query for the layout." +#~ msgid "Resources" #~ msgstr "" -#~ msgid "Corresponding layout based on the query." +#~ msgid "" +#~ "Ray Dashboard: https://docs.ray.io/en/latest/ray-" +#~ "observability/getting-started.html" +#~ msgstr "" + +#~ msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get `\\ " -#~ "\\(key\\[\\, default\\]\\)" +#~ "Simulating Federated Learning workloads is " +#~ "useful for a multitude of use-" +#~ "cases: you might want to run your" +#~ " workload on a large cohort of " +#~ "clients but without having to source," +#~ " configure and mange a large number" +#~ " of physical devices; you might want" +#~ " to run your FL workloads as " +#~ "fast as possible on the compute " +#~ "systems you have access to without " +#~ "having to go through a complex " +#~ "setup process; you might want to " +#~ "validate your algorithm on different " +#~ "scenarios at varying levels of data " +#~ "and system heterogeneity, client availability," +#~ " privacy budgets, etc. These are " +#~ "among some of the use-cases where" +#~ " simulating FL workloads makes sense. " +#~ "Flower can accommodate these scenarios " +#~ "by means of its `VirtualClientEngine " +#~ "`_ or VCE." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get `\\ " -#~ "\\(key\\[\\, default\\]\\)" +#~ "The ``VirtualClientEngine`` schedules, launches " +#~ "and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the ``VirtualClientEngine`` are:" #~ msgstr "" -#~ msgid ":py:obj:`strategy `\\" +#~ msgid "" +#~ "resource-aware: this means that each " +#~ "client gets assigned a portion of " +#~ "the compute and memory on your " +#~ "system. You as a user can control" +#~ " this at the beginning of the " +#~ "simulation and allows you to control " +#~ "the degree of parallelism of your " +#~ "Flower FL simulation. The fewer the " +#~ "resources per client, the more clients" +#~ " can run concurrently on the same " +#~ "hardware." #~ msgstr "" -#~ msgid ":py:obj:`workflow `\\" +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to ``VirtualClientEngine``'s " +#~ "internals." #~ msgstr "" -#~ msgid "run\\_server\\_app" +#~ msgid "" +#~ "ephemeral: this means that a client " +#~ "is only materialized when it is " +#~ "required in the FL process (e.g. " +#~ "to do `fit() `_). The object is" +#~ " destroyed afterwards, releasing the " +#~ "resources it was assigned and allowing" +#~ " in this way other clients to " +#~ "participate." #~ msgstr "" -#~ msgid "run\\_superlink" +#~ msgid "" +#~ "The ``VirtualClientEngine`` implements `virtual` " +#~ "clients using `Ray `_, an" +#~ " open-source framework for scalable " +#~ "Python workloads. In particular, Flower's " +#~ "``VirtualClientEngine`` makes use of `Actors" +#~ " `_ " +#~ "to spawn `virtual` clients and run " +#~ "their workload." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_simulation `\\" -#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" #~ msgstr "" -#~ msgid "Start a Ray-based Flower simulation server." +#~ msgid "VirtualClientEngine resources" #~ msgstr "" #~ msgid "" -#~ "A function creating `Client` instances. " -#~ "The function must have the signature " -#~ "`client_fn(context: Context). It should return" -#~ " a single client instance of type " -#~ "`Client`. Note that the created client" -#~ " instances are ephemeral and will " -#~ "often be destroyed after a single " -#~ "method invocation. Since client instances " -#~ "are not long-lived, they should " -#~ "not attempt to carry state over " -#~ "method invocations. Any state required " -#~ "by the instance (model, dataset, " -#~ "hyperparameters, ...) should be (re-)created" -#~ " in either the call to `client_fn`" -#~ " or the call to any of the " -#~ "client methods (e.g., load evaluation " -#~ "data in the `evaluate` method itself)." +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the ``ray_init_args`` input argument " +#~ "to ``start_simulation`` which the VCE " +#~ "internally passes to Ray's ``ray.init`` " +#~ "command. For a complete list of " +#~ "settings you can configure check the " +#~ "`ray.init `_ documentation. " +#~ "Do not set ``ray_init_args`` if you " +#~ "want the VCE to use all your " +#~ "system's CPUs and GPUs." #~ msgstr "" -#~ msgid "The total number of clients in this simulation." +#~ msgid "Assigning client resources" #~ msgstr "" #~ msgid "" -#~ "UNSUPPORTED, WILL BE REMOVED. USE " -#~ "`num_clients` INSTEAD. List `client_id`s for" -#~ " each client. This is only required" -#~ " if `num_clients` is not set. Setting" -#~ " both `num_clients` and `clients_ids` with" -#~ " `len(clients_ids)` not equal to " -#~ "`num_clients` generates an error. Using " -#~ "this argument will raise an error." +#~ "By default the ``VirtualClientEngine`` assigns" +#~ " a single CPU core (and nothing " +#~ "else) to each virtual client. This " +#~ "means that if your system has 10" +#~ " cores, that many virtual clients can" +#~ " be concurrently running." #~ msgstr "" #~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. To understand the GPU" -#~ " utilization caused by `num_gpus`, as " -#~ "well as using custom resources, please" -#~ " consult the Ray documentation." +#~ "More often than not, you would " +#~ "probably like to adjust the resources" +#~ " your clients get assigned based on" +#~ " the complexity (i.e. compute and " +#~ "memory footprint) of your FL workload." +#~ " You can do so when starting " +#~ "your simulation by setting the argument" +#~ " `client_resources` to `start_simulation `_. Two " +#~ "keys are internally used by Ray to" +#~ " schedule and spawn workloads (in our" +#~ " case Flower clients):" +#~ msgstr "" + +#~ msgid "``num_cpus`` indicates the number of CPU cores a client would get." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." +#~ "``num_gpus`` indicates the **ratio** of " +#~ "GPU memory a client gets assigned." +#~ msgstr "" + +#~ msgid "Let's see a few examples:" #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ "While the ``client_resources`` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ "``VirtualClientEngine`` will schedule 100 jobs" +#~ " to run (each simulating a client " +#~ "sampled by the strategy) and then " +#~ "will execute them in a resource-" +#~ "aware manner in batches of 8." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "`PyTorch Simulation " +#~ "`_: 100 clients collaboratively train" +#~ " a CNN model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "Flower's ``VirtualClientEngine`` allows you to" +#~ " run FL simulations across multiple " +#~ "compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" #~ msgstr "" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "Have the same Python environment in all nodes." #~ msgstr "" -#~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." +#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." #~ msgstr "" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" #~ msgstr "" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "executing a ClientApp wrapping input " -#~ "argument `client_fn`." +#~ "Pass ``ray_init_args={\"address\"=\"auto\"}`` to " +#~ "`start_simulation `_ so the " +#~ "``VirtualClientEngine`` attaches to a running" +#~ " Ray instance." #~ msgstr "" #~ msgid "" -#~ "If you want to create your own " -#~ "Actor classes, you might need to " -#~ "pass some input argument. You can " -#~ "use this dictionary for such purpose." +#~ "Start Ray on you head node: on " +#~ "the terminal type ``ray start --head``." +#~ " This command will print a few " +#~ "lines, one of which indicates how " +#~ "to attach other nodes to the head" +#~ " node." #~ msgstr "" #~ msgid "" -#~ "(default: \"DEFAULT\") Optional string " -#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" -#~ " to choose in which node the " -#~ "actor is placed. If you are an " -#~ "advanced user needed more control you" -#~ " can use lower-level scheduling " -#~ "strategies to pin actors to specific " -#~ "compute nodes (e.g. via " -#~ "NodeAffinitySchedulingStrategy). Please note this" -#~ " is an advanced feature. For all " -#~ "details, please refer to the Ray " -#~ "documentation: https://docs.ray.io/en/latest/ray-" -#~ "core/scheduling/index.html" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example ``ray start --address='192.168.1.132:6379'``" #~ msgstr "" -#~ msgid "**hist** -- Object containing metrics from training." +#~ msgid "" +#~ "With all the above done, you can" +#~ " run your code from the head " +#~ "node as you would if the " +#~ "simulation was running on a single " +#~ "node." #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with FastAI to train a vision " -#~ "model on CIFAR-10." +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " ``ray stop`` in each node's terminal" +#~ " (including the head node)." #~ msgstr "" -#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgid "Multi-node simulation good-to-know" #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn more." +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" #~ msgstr "" #~ msgid "" -#~ "Check out this Federating Learning " -#~ "quickstart tutorial for using Flower " -#~ "with HuggingFace Transformers in order " -#~ "to fine-tune an LLM." +#~ "User ``ray status`` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the ``VirtualClientEngine``." #~ msgstr "" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using Hugging Face Transformers and " -#~ "Flower!" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the ``VirtualClientEngine`` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any ``ray " +#~ "start`` command (including when starting " +#~ "the head)" #~ msgstr "" -#~ msgid "" -#~ "We will leverage Hugging Face to " -#~ "federate the training of language models" -#~ " over multiple clients using Flower. " -#~ "More specifically, we will fine-tune " -#~ "a pre-trained Transformer model " -#~ "(distilBERT) for sequence classification over" -#~ " a dataset of IMDB ratings. The " -#~ "end goal is to detect if a " -#~ "movie rating is positive or negative." +#~ msgid "Considerations for simulations" #~ msgstr "" -#~ msgid "Dependencies" +#~ msgid "" +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." #~ msgstr "" #~ msgid "" -#~ "To follow along this tutorial you " -#~ "will need to install the following " -#~ "packages: :code:`datasets`, :code:`evaluate`, " -#~ ":code:`flwr`, :code:`torch`, and " -#~ ":code:`transformers`. This can be done " -#~ "using :code:`pip`:" +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." #~ msgstr "" -#~ msgid "Standard Hugging Face workflow" +#~ msgid "GPU resources" #~ msgstr "" -#~ msgid "Handling the data" +#~ msgid "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key ``num_gpus`` in ``client_resources``." +#~ " This being said, Ray (used " +#~ "internally by the VCE) is by " +#~ "default:" #~ msgstr "" #~ msgid "" -#~ "To fetch the IMDB dataset, we will" -#~ " use Hugging Face's :code:`datasets` " -#~ "library. We then need to tokenize " -#~ "the data and create :code:`PyTorch` " -#~ "dataloaders, this is all done in " -#~ "the :code:`load_data` function:" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set ``num_gpus=0.5`` and " +#~ "you have two GPUs in your system" +#~ " with different (e.g. 32GB and 8GB)" +#~ " VRAM amounts, they both would run" +#~ " 2 clients concurrently." #~ msgstr "" -#~ msgid "Training and testing the model" +#~ msgid "" +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" #~ msgstr "" #~ msgid "" -#~ "Once we have a way of creating " -#~ "our trainloader and testloader, we can" -#~ " take care of the training and " -#~ "testing. This is very similar to " -#~ "any :code:`PyTorch` training or testing " -#~ "loop:" +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" #~ msgstr "" -#~ msgid "Creating the model itself" +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ "``CUDA_VISIBLE_DEVICES=\"\"`` when launching " +#~ "your experiment." #~ msgstr "" #~ msgid "" -#~ "To create the model itself, we " -#~ "will just load the pre-trained " -#~ "distillBERT model using Hugging Face’s " -#~ ":code:`AutoModelForSequenceClassification` :" +#~ "In addition, the GPU resource limits " +#~ "passed to ``client_resources`` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" -#~ msgid "Federating the example" +#~ msgid "TensorFlow with GPUs" #~ msgstr "" -#~ msgid "Creating the IMDBClient" +#~ msgid "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "To federate our example to multiple " -#~ "clients, we first need to write " -#~ "our Flower client class (inheriting from" -#~ " :code:`flwr.client.NumPyClient`). This is very" -#~ " easy, as our model is a " -#~ "standard :code:`PyTorch` model:" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of ``actor_kwargs`` we can pass" +#~ " the reserved key `\"on_actor_init_fn\"` in" +#~ " order to specify a function to " +#~ "be executed upon actor initialization. " +#~ "In this case, to enable GPU growth" +#~ " for TF workloads. It would look " +#~ "as follows:" #~ msgstr "" #~ msgid "" -#~ "The :code:`get_parameters` function lets the" -#~ " server get the client's parameters. " -#~ "Inversely, the :code:`set_parameters` function " -#~ "allows the server to send its " -#~ "parameters to the client. Finally, the" -#~ " :code:`fit` function trains the model " -#~ "locally for the client, and the " -#~ ":code:`evaluate` function tests the model " -#~ "locally and returns the relevant " -#~ "metrics." +#~ "This is precisely the mechanism used " +#~ "in `Tensorflow/Keras Simulation " +#~ "`_ example." #~ msgstr "" -#~ msgid "Starting the server" +#~ msgid "Multi-node setups" #~ msgstr "" #~ msgid "" -#~ "Now that we have a way to " -#~ "instantiate clients, we need to create" -#~ " our server in order to aggregate " -#~ "the results. Using Flower, this can " -#~ "be done very easily by first " -#~ "choosing a strategy (here, we are " -#~ "using :code:`FedAvg`, which will define " -#~ "the global weights as the average " -#~ "of all the clients' weights at " -#~ "each round) and then using the " -#~ ":code:`flwr.server.start_server` function:" +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." #~ msgstr "" #~ msgid "" -#~ "The :code:`weighted_average` function is there" -#~ " to provide a way to aggregate " -#~ "the metrics distributed amongst the " -#~ "clients (basically this allows us to " -#~ "display a nice average accuracy and " -#~ "loss for every round)." +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." #~ msgstr "" -#~ msgid "Putting everything together" +#~ msgid "Save and load model checkpoints" #~ msgstr "" -#~ msgid "We can now start client instances using:" +#~ msgid "Model checkpointing" #~ msgstr "" -#~ msgid "" -#~ "And they will be able to connect" -#~ " to the server and start the " -#~ "federated training." +#~ msgid "Save and load PyTorch checkpoints" #~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the `full code example " -#~ "`_ ." +#~ "For ensuring data instance-level privacy" +#~ " during local model training on the" +#~ " client side, consider leveraging privacy" +#~ " engines such as Opacus and " +#~ "TensorFlow Privacy. For examples of " +#~ "using Flower with these engines, please" +#~ " refer to the Flower examples " +#~ "directory (`Opacus " +#~ "`_, " +#~ "`Tensorflow Privacy " +#~ "`_)." #~ msgstr "" #~ msgid "" -#~ "Of course, this is a very basic" -#~ " example, and a lot can be " -#~ "added or modified, it was just to" -#~ " showcase how simply we could " -#~ "federate a Hugging Face workflow using" -#~ " Flower." +#~ "Flower comes with a number of " +#~ "popular federated learning strategies " +#~ "built-in. A built-in strategy can " +#~ "be instantiated as follows:" #~ msgstr "" #~ msgid "" -#~ "Note that in this example we used" -#~ " :code:`PyTorch`, but we could have " -#~ "very well used :code:`TensorFlow`." +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ "``start_server`` function. It is usually " +#~ "recommended to adjust a few parameters" +#~ " during instantiation:" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with PyTorch Lightning to train an " -#~ "Auto Encoder model on MNIST." +#~ "Existing strategies provide several ways " +#~ "to customize their behaviour. Callback " +#~ "functions allow strategies to call " +#~ "user-provided code during execution." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using PyTorch Lightning " -#~ "and Flower!" +#~ "The server can pass new configuration" +#~ " values to the client each round " +#~ "by providing a function to " +#~ "``on_fit_config_fn``. The provided function " +#~ "will be called by the strategy and" +#~ " must return a dictionary of " +#~ "configuration key values pairs that will" +#~ " be sent to the client. It must" +#~ " return a dictionary of arbitrary " +#~ "configuration values ``client.fit`` and " +#~ "``client.evaluate`` functions during each " +#~ "round of federated learning." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "The ``on_fit_config_fn`` can be used to" +#~ " pass arbitrary configuration values from" +#~ " server to client, and potentially " +#~ "change these values each round, for " +#~ "example, to adjust the learning rate." +#~ " The client will receive the " +#~ "dictionary returned by the " +#~ "``on_fit_config_fn`` in its own " +#~ "``client.fit()`` function." +#~ msgstr "" + +#~ msgid "Legacy example guides" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with TensorFlow to train a MobilNetV2" -#~ " model on CIFAR-10." +#~ "Welcome to Flower's documentation. `Flower " +#~ "`_ is a friendly federated" +#~ " AI framework." #~ msgstr "" -#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgid "flwr CLI" #~ msgstr "" -#~ msgid "Before Flower can be imported we have to install it:" +#~ msgid "flwr is the Flower command line interface." #~ msgstr "" -#~ msgid "" -#~ "Since we want to use the Keras " -#~ "API of TensorFlow (TF), we have to" -#~ " install TF as well:" +#~ msgid "Options" #~ msgstr "" -#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgid "Install completion for the current shell." #~ msgstr "" #~ msgid "" -#~ "We use the Keras utilities of TF" -#~ " to load CIFAR10, a popular colored" -#~ " image classification dataset for machine" -#~ " learning. The call to " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " -#~ "CIFAR10, caches it locally, and then " -#~ "returns the entire training and test " -#~ "set as NumPy ndarrays." +#~ "Show completion for the current shell," +#~ " to copy it or customize the " +#~ "installation." #~ msgstr "" -#~ msgid "" -#~ "Next, we need a model. For the " -#~ "purpose of this tutorial, we use " -#~ "MobilNetV2 with 10 output classes:" +#~ msgid "Build a Flower App into a Flower App Bundle (FAB)." #~ msgstr "" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to train the neural network" -#~ " we defined earlier)." +#~ "You can run ``flwr build`` without " +#~ "any arguments to bundle the app " +#~ "located in the current directory. " +#~ "Alternatively, you can you can specify" +#~ " a path using the ``--app`` option" +#~ " to bundle an app located at " +#~ "the provided path. For example:" #~ msgstr "" -#~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses Keras." -#~ " The :code:`NumPyClient` interface defines " -#~ "three methods which can be implemented" -#~ " in the following way:" +#~ msgid "``flwr build --app ./apps/flower-hello-world``." +#~ msgstr "" + +#~ msgid "Path of the Flower App to bundle into a FAB" #~ msgstr "" -#~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`CifarClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ msgid "Install a Flower App Bundle." #~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ msgid "It can be ran with a single FAB file argument:" #~ msgstr "" -#~ msgid "Each client will have its own dataset." +#~ msgid "``flwr install ./target_project.fab``" #~ msgstr "" -#~ msgid "" -#~ "You should now see how the " -#~ "training does in the very first " -#~ "terminal (the one that started the " -#~ "server):" +#~ msgid "The target install directory can be specified with ``--flwr-dir``:" #~ msgstr "" -#~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this can be " -#~ "found in :code:`examples/quickstart-" -#~ "tensorflow/client.py`." +#~ msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" #~ msgstr "" -#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgid "" +#~ "This will install ``target_project`` to " +#~ "``./docs/flwr/``. By default, ``flwr-dir`` " +#~ "is equal to:" #~ msgstr "" -#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" #~ msgstr "" -#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" #~ msgstr "" -#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgid "``$HOME/.flwr/`` in all other cases" #~ msgstr "" -#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgid "The desired install path." #~ msgstr "" -#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgid "Optional argument" +#~ msgstr "Argumento de compilação" + +#~ msgid "The source FAB file to install." #~ msgstr "" -#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgid "Get logs from a Flower project run." #~ msgstr "" -#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgid "Flag to stream or print logs from the Flower run" #~ msgstr "" -#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgid "default" #~ msgstr "" -#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgid "``True``" #~ msgstr "" -#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgid "Required argument" +#~ msgstr "Argumento de compilação" + +#~ msgid "The Flower run ID to query" #~ msgstr "" -#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgid "Path of the Flower project to run" #~ msgstr "" -#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgid "Name of the federation to run the app on" #~ msgstr "" -#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgid "Create new Flower App." #~ msgstr "" -#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgid "The ML framework to use" #~ msgstr "" -#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgid "options" #~ msgstr "" -#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgid "" +#~ "PyTorch | TensorFlow | sklearn | " +#~ "HuggingFace | JAX | MLX | NumPy" +#~ " | FlowerTune | Flower Baseline" #~ msgstr "" -#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgid "The Flower username of the author" #~ msgstr "" -#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgid "The name of the Flower App" +#~ msgstr "O nome do repositório da imagem base." + +#~ msgid "Run Flower App." #~ msgstr "" -#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgid "Override configuration key-value pairs, should be of the format:" #~ msgstr "" -#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgid "" +#~ "`--run-config 'key1=\"value1\" key2=\"value2\"' " +#~ "--run-config 'key3=\"value3\"'`" #~ msgstr "" -#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgid "" +#~ "Note that `key1`, `key2`, and `key3` " +#~ "in this example need to exist " +#~ "inside the `pyproject.toml` in order to" +#~ " be properly overriden." #~ msgstr "" -#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgid "" +#~ "Use `--stream` with `flwr run` to " +#~ "display logs; logs are not streamed " +#~ "by default." #~ msgstr "" -#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgid "``False``" +#~ msgstr "``FLWR_VERSION``" + +#~ msgid "Path of the Flower App to run." +#~ msgstr "O nome do repositório da imagem base." + +#~ msgid "Name of the federation to run the app on." #~ msgstr "" -#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgid "flower-simulation" #~ msgstr "" -#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgid "flower-superlink" #~ msgstr "" -#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgid "flower-supernode" #~ msgstr "" -#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgid "flower-server-app" #~ msgstr "" #~ msgid "" -#~ "The Visual Studio Code Remote - " -#~ "Containers extension lets you use a " -#~ "Docker container as a fully-featured " -#~ "development environment. It allows you " -#~ "to open any folder inside (or " -#~ "mounted into) a container and take " -#~ "advantage of Visual Studio Code's full" -#~ " feature set. A :code:`devcontainer.json` " -#~ "file in your project tells VS Code" -#~ " how to access (or create) a " -#~ "development container with a well-" -#~ "defined tool and runtime stack. This " -#~ "container can be used to run an" -#~ " application or to separate tools, " -#~ "libraries, or runtimes needed for " -#~ "working with a codebase." +#~ "Note that since version ``1.11.0``, " +#~ "``flower-server-app`` no longer supports" +#~ " passing a reference to a `ServerApp`" +#~ " attribute. Instead, you need to pass" +#~ " the path to Flower app via the" +#~ " argument ``--app``. This is the path" +#~ " to a directory containing a " +#~ "`pyproject.toml`. You can create a valid" +#~ " Flower app by executing ``flwr new``" +#~ " and following the prompt." #~ msgstr "" -#~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you don't have to do it. " -#~ "Usually it should be enough to " -#~ "install `Docker " -#~ "`_ on your " -#~ "system and ensure its available on " -#~ "your command line. Additionally, install " -#~ "the `VSCode Containers Extension " -#~ "`_." +#~ msgid "flower-superexec" #~ msgstr "" #~ msgid "" -#~ "If you prefer to use Anaconda for" -#~ " your virtual environment then install " -#~ "and setup the `conda " -#~ "`_ package. After setting" -#~ " it up you can create a virtual" -#~ " environment with:" +#~ ":py:obj:`Context `\\ \\(node\\_id\\," +#~ " node\\_config\\, state\\, run\\_config\\)" #~ msgstr "" -#~ msgid "The :code:`SecAgg+` abstraction" +#~ msgid "" +#~ "Holds records added by the entity " +#~ "in a given run and that will " +#~ "stay local. This means that the " +#~ "data it holds will never leave the" +#~ " system it's running from. This can" +#~ " be used as an intermediate storage" +#~ " or scratchpad when executing mods. " +#~ "It can also be used as a " +#~ "memory to access at different points " +#~ "during the lifecycle of this entity " +#~ "(e.g. across multiple rounds)" #~ msgstr "" -#~ msgid "The :code:`LightSecAgg` abstraction" +#~ msgid "" +#~ "A config (key/value mapping) held by " +#~ "the entity in a given run and " +#~ "that will stay local. It can be" +#~ " used at any point during the " +#~ "lifecycle of this entity (e.g. across" +#~ " multiple rounds)" #~ msgstr "" #~ msgid "" -#~ "A fork is a personal copy of " -#~ "a GitHub repository. To create one " -#~ "for Flower, you must navigate to " -#~ "``_ (while connected " -#~ "to your GitHub account) and click " -#~ "the ``Fork`` button situated on the " -#~ "top right of the page." +#~ ":py:obj:`RUN_SUPEREXEC_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "To check which files have been " -#~ "modified compared to the last version" -#~ " (last commit) and to see which " -#~ "files are staged for commit, you " -#~ "can use the :code:`git status` command." +#~ ":py:obj:`RUN_SUPEREXEC_LEAVE " +#~ "`\\" +#~ msgstr "" + +#~ msgid "Abstract base Driver class for the Driver API." #~ msgstr "" #~ msgid "" -#~ "Once you have added all the files" -#~ " you wanted to commit using " -#~ ":code:`git add`, you can finally create" -#~ " your commit using this command:" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*args\\, \\*\\*kwargs\\)" +#~ msgstr "" + +#~ msgid "Log error stating that module `ray` could not be imported." #~ msgstr "" #~ msgid "" -#~ "The \\ is there to " -#~ "explain to others what the commit " -#~ "does. It should be written in an" -#~ " imperative style and be concise. An" -#~ " example would be :code:`git commit " -#~ "-m \"Add images to README\"`." +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing JAX workload. " +#~ "We are using JAX to train a " +#~ "linear regression model on a scikit-" +#~ "learn dataset. We will structure the " +#~ "example similar to our `PyTorch - " +#~ "From Centralized To Federated " +#~ "`_ walkthrough. " +#~ "First, we build a centralized training" +#~ " approach based on the `Linear " +#~ "Regression with JAX " +#~ "`_" +#~ " tutorial`. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" #~ msgid "" -#~ ":doc:`Good first contributions `, where you" -#~ " should particularly look into the " -#~ ":code:`baselines` contributions." +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " ``jax``, ``jaxlib``, ``scikit-learn``, and" +#~ " ``flwr``:" +#~ msgstr "" + +#~ msgid "Linear Regression with JAX" #~ msgstr "" #~ msgid "" -#~ "Flower uses :code:`pyproject.toml` to manage" -#~ " dependencies and configure development " -#~ "tools (the ones which support it). " -#~ "Poetry is a build tool which " -#~ "supports `PEP 517 " -#~ "`_." +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a ``Linear Regression`` model. If" +#~ " you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." #~ msgstr "" #~ msgid "" -#~ "Install `xz` (to install different " -#~ "Python versions) and `pandoc` to build" -#~ " the docs::" +#~ "Let's create a new file called " +#~ "``jax_training.py`` with all the components" +#~ " required for a traditional (centralized)" +#~ " linear regression training. First, the " +#~ "JAX packages ``jax`` and ``jaxlib`` need" +#~ " to be imported. In addition, we " +#~ "need to import ``sklearn`` since we " +#~ "use ``make_regression`` for the dataset " +#~ "and ``train_test_split`` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the ``flwr`` package " +#~ "for federated learning. This will be " +#~ "done later." #~ msgstr "" #~ msgid "" -#~ "Ensure you system (Ubuntu 22.04+) is " -#~ "up-to-date, and you have all " -#~ "necessary packages::" +#~ "The ``load_data()`` function loads the " +#~ "mentioned training and test sets." #~ msgstr "" #~ msgid "" -#~ "1. Clone the `Flower repository " -#~ "`_ from GitHub::" +#~ "The model architecture (a very simple" +#~ " ``Linear Regression`` model) is defined" +#~ " in ``load_model()``." #~ msgstr "" #~ msgid "" -#~ "Let's create the Python environment for" -#~ " all-things Flower. If you wish " -#~ "to use :code:`pyenv`, we provide two " -#~ "convenience scripts that you can use." -#~ " If you prefer using something else" -#~ " than :code:`pyenv`, create a new " -#~ "environment, activate and skip to the" -#~ " last point where all packages are" -#~ " installed." +#~ "We now need to define the training" +#~ " (function ``train()``), which loops over" +#~ " the training set and measures the" +#~ " loss (function ``loss_fn()``) for each " +#~ "batch of training examples. The loss " +#~ "function is separate since JAX takes " +#~ "derivatives with a ``grad()`` function " +#~ "(defined in the ``main()`` function and" +#~ " called in ``train()``)." #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, the following script that " -#~ "will install it, set it up, and" -#~ " create the virtual environment (with " -#~ ":code:`Python 3.9.20` by default)::" +#~ "The evaluation of the model is " +#~ "defined in the function ``evaluation()``. " +#~ "The function takes all test examples " +#~ "and measures the loss of the " +#~ "linear regression model." #~ msgstr "" #~ msgid "" -#~ "If you already have :code:`pyenv` " -#~ "installed (along with the :code:`pyenv-" -#~ "virtualenv` plugin), you can use the " -#~ "following convenience script (with " -#~ ":code:`Python 3.9.20` by default)::" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the ``jax.grad()`` function" +#~ " is defined in ``main()`` and passed" +#~ " to ``train()``." +#~ msgstr "" + +#~ msgid "You can now run your (centralized) JAX linear regression workload:" #~ msgstr "" #~ msgid "" -#~ "3. Install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ "So far this should all look fairly" +#~ " familiar if you've used JAX before." +#~ " Let's take the next step and " +#~ "use what we've built to create a" +#~ " simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." +#~ msgstr "" + +#~ msgid "JAX meets Flower" #~ msgstr "" #~ msgid "" -#~ "The Flower repository contains a number" -#~ " of convenience scripts to make " -#~ "recurring development tasks easier and " -#~ "less error-prone. See the :code:`/dev`" -#~ " subdirectory for a full list. The" -#~ " following scripts are amongst the " -#~ "most important ones:" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in ``jax_training.py`` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." #~ msgstr "" #~ msgid "" -#~ "If in a hurry, bypass the hook " -#~ "using ``--no-verify`` with the ``git " -#~ "commit`` command. ::" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined JAX training " +#~ "in ``jax_training.py``. Our *client* needs " +#~ "to import ``flwr``, but also ``jax`` " +#~ "and ``jaxlib`` to update the parameters" +#~ " on our JAX model:" #~ msgstr "" #~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "`_. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``FlowerClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. ``FlowerClient`` needs to implement" +#~ " four methods, two methods for " +#~ "getting/setting model parameters, one method" +#~ " for training the model, and one " +#~ "method for testing the model:" #~ msgstr "" -#~ msgid "" -#~ "Flower uses Poetry to build releases." -#~ " The necessary command is wrapped in" -#~ " a simple script::" +#~ msgid "``set_parameters (optional)``" #~ msgstr "" -#~ msgid "" -#~ "The resulting :code:`.whl` and :code:`.tar.gz`" -#~ " releases will be stored in the " -#~ ":code:`/dist` subdirectory." +#~ msgid "transform parameters to NumPy ``ndarray``'s" #~ msgstr "" -#~ msgid "" -#~ "Flower's documentation uses `Sphinx " -#~ "`_. There's no " -#~ "convenience script to re-build the " -#~ "documentation yet, but it's pretty " -#~ "easy::" +#~ msgid "get the updated local model parameters and return them to the server" #~ msgstr "" -#~ msgid "" -#~ "Some quickstart examples may have " -#~ "limitations or requirements that prevent " -#~ "them from running on every environment." -#~ " For more information, please see " -#~ "`Limitations`_." +#~ msgid "return the local loss to the server" #~ msgstr "" #~ msgid "" -#~ "Change the application code. For " -#~ "example, change the ``seed`` in " -#~ "``quickstart_docker/task.py`` to ``43`` and " -#~ "save it:" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ "``DeviceArray`` to ``NumPy ndarray`` to " +#~ "make them compatible with `NumPyClient`." #~ msgstr "" #~ msgid "" -#~ "All files are revised based on " -#~ ":doc:`Example: PyTorch - From Centralized " -#~ "To Federated `. The only thing" -#~ " to do is modifying the file " -#~ "called :code:`cifar.py`, revised part is " -#~ "shown below:" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``evaluate()`` " +#~ "previously defined in ``jax_training.py``. So" +#~ " what we really do here is we" +#~ " tell Flower through our ``NumPyClient``" +#~ " subclass which of our already " +#~ "defined functions to call for training" +#~ " and evaluation. We included type " +#~ "annotations to give you a better " +#~ "understanding of the data types that " +#~ "get passed around." #~ msgstr "" -#~ msgid "" -#~ "If you have read :doc:`Example: PyTorch" -#~ " - From Centralized To Federated " -#~ "`, the following parts are " -#~ "easy to follow, only :code:`get_parameters`" -#~ " and :code:`set_parameters` function in " -#~ ":code:`client.py` needed to revise. If " -#~ "not, please read the :doc:`Example: " -#~ "PyTorch - From Centralized To Federated" -#~ " `. first." +#~ msgid "Having defined the federation process, we can run it." #~ msgstr "" #~ msgid "" -#~ "Our example consists of one *server* " -#~ "and two *clients*. In FedBN, " -#~ ":code:`server.py` keeps unchanged, we can " -#~ "start the server directly." +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your JAX " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" #~ msgid "" -#~ "Finally, we will revise our *client* " -#~ "logic by changing :code:`get_parameters` and" -#~ " :code:`set_parameters` in :code:`client.py`, we" -#~ " will exclude batch normalization " -#~ "parameters from model parameter list " -#~ "when sending to or receiving from " -#~ "the server." +#~ "The source code of this example " +#~ "was improved over time and can be" +#~ " found here: `Quickstart JAX " +#~ "`_. Our example is somewhat over-" +#~ "simplified because both clients load the" +#~ " same dataset." #~ msgstr "" #~ msgid "" -#~ "Let's create a new file called " -#~ ":code:`cifar.py` with all the components " -#~ "required for a traditional (centralized) " -#~ "training on CIFAR-10. First, all " -#~ "required packages (such as :code:`torch` " -#~ "and :code:`torchvision`) need to be " -#~ "imported. You can see that we do" -#~ " not import any package for federated" -#~ " learning. You can keep all these " -#~ "imports as they are even when we" -#~ " add the federated learning components " -#~ "at a later point." +#~ "You're now prepared to explore this " +#~ "topic further. How about using a " +#~ "more sophisticated model or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" #~ msgid "" -#~ "As already mentioned we will use " -#~ "the CIFAR-10 dataset for this machine" -#~ " learning workload. The model architecture" -#~ " (a very simple Convolutional Neural " -#~ "Network) is defined in :code:`class " -#~ "Net()`." +#~ "In this tutorial, we will learn " +#~ "how to train a ``Logistic Regression``" +#~ " model on MNIST using Flower and " +#~ "scikit-learn." #~ msgstr "" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "CIFAR-10 training and test sets. The " -#~ ":code:`transform` normalized the data after" -#~ " loading." +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this :doc:`virtualenv `." #~ msgstr "" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set, measures the " -#~ "loss, backpropagates it, and then takes" -#~ " one optimizer step for each batch" -#~ " of training examples." +#~ "Our example consists of one *server* " +#~ "and two *clients* all having the " +#~ "same model." #~ msgstr "" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in the function :code:`test()`. " -#~ "The function loops over all test " -#~ "samples and measures the loss of " -#~ "the model based on the test " -#~ "dataset." +#~ "*Clients* are responsible for generating " +#~ "individual model parameter updates for " +#~ "the model based on their local " +#~ "datasets. These updates are then sent" +#~ " to the *server* which will aggregate" +#~ " them to produce an updated global" +#~ " model. Finally, the *server* sends " +#~ "this improved version of the model " +#~ "back to each *client*. A complete " +#~ "cycle of parameters updates is called" +#~ " a *round*." #~ msgstr "" #~ msgid "" -#~ "The concept is easy to understand. " -#~ "We have to start a *server* and" -#~ " then use the code in " -#~ ":code:`cifar.py` for the *clients* that " -#~ "are connected to the *server*. The " -#~ "*server* sends model parameters to the" -#~ " clients. The *clients* run the " -#~ "training and update the parameters. The" -#~ " updated parameters are sent back to" -#~ " the *server* which averages all " -#~ "received parameter updates. This describes " -#~ "one round of the federated learning " -#~ "process and we repeat this for " -#~ "multiple rounds." +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by " +#~ "running:" #~ msgstr "" -#~ msgid "" -#~ "Our example consists of one *server* " -#~ "and two *clients*. Let's set up " -#~ ":code:`server.py` first. The *server* needs" -#~ " to import the Flower package " -#~ ":code:`flwr`. Next, we use the " -#~ ":code:`start_server` function to start a " -#~ "server and tell it to perform " -#~ "three rounds of federated learning." +#~ msgid "Since we want to use scikit-learn, let's go ahead and install it:" #~ msgstr "" -#~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined centralized " -#~ "training in :code:`cifar.py`. Our *client* " -#~ "needs to import :code:`flwr`, but also" -#~ " :code:`torch` to update the parameters " -#~ "on our PyTorch model:" +#~ msgid "Or simply install all dependencies using Poetry:" #~ msgstr "" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`CifarClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or TensorFlow/Keras) because it " -#~ "avoids some of the boilerplate that " -#~ "would otherwise be necessary. " -#~ ":code:`CifarClient` needs to implement four" -#~ " methods, two methods for getting/setting" -#~ " model parameters, one method for " -#~ "training the model, and one method " -#~ "for testing the model:" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within ``utils.py``. The " +#~ "``utils.py`` contains different functions " +#~ "defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid ":code:`set_parameters`" +#~ msgid "``get_model_parameters()``" #~ msgstr "" -#~ msgid "" -#~ "loop over the list of model " -#~ "parameters received as NumPy :code:`ndarray`'s" -#~ " (think list of neural network " -#~ "layers)" +#~ msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" #~ msgstr "" -#~ msgid ":code:`get_parameters`" +#~ msgid "``set_model_params()``" #~ msgstr "" -#~ msgid "" -#~ "get the model parameters and return " -#~ "them as a list of NumPy " -#~ ":code:`ndarray`'s (which is what " -#~ ":code:`flwr.client.NumPyClient` expects)" +#~ msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" #~ msgstr "" -#~ msgid ":code:`fit`" +#~ msgid "``set_initial_params()``" #~ msgstr "" -#~ msgid ":code:`evaluate`" +#~ msgid "Initializes the model parameters that the Flower server will ask for" #~ msgstr "" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`cifar.py`. So what we really do" -#~ " here is we tell Flower through " -#~ "our :code:`NumPyClient` subclass which of " -#~ "our already defined functions to call" -#~ " for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "Please check out ``utils.py`` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the ``client.py`` and imported. The" +#~ " ``client.py`` also requires to import " +#~ "several packages such as Flower and " +#~ "scikit-learn:" #~ msgstr "" #~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_client()` by pointing it " -#~ "at the same IP address we used " -#~ "in :code:`server.py`:" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The ``FederatedDataset.load_partition()`` method " +#~ "loads the partitioned training set for" +#~ " each partition ID defined in the " +#~ "``--partition-id`` argument." #~ msgstr "" #~ msgid "" -#~ "\\small\n" -#~ "\\frac{∆ \\times \\sqrt{2 \\times " -#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -#~ "\n" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ "``utils.set_initial_params()``." #~ msgstr "" #~ msgid "" -#~ "The :code:`Strategy` abstraction provides a" -#~ " method called :code:`evaluate` that can" -#~ " directly be used to evaluate the " -#~ "current global model parameters. The " -#~ "current server implementation calls " -#~ ":code:`evaluate` after parameter aggregation " -#~ "and before federated evaluation (see " -#~ "next paragraph)." +#~ "The Flower server interacts with clients" +#~ " through an interface called ``Client``." +#~ " When the server selects a particular" +#~ " client for training, it sends " +#~ "training instructions over the network. " +#~ "The client receives those instructions " +#~ "and calls one of the ``Client`` " +#~ "methods to run your code (i.e., to" +#~ " fit the logistic regression we " +#~ "defined earlier)." #~ msgstr "" #~ msgid "" -#~ "Client-side evaluation happens in the" -#~ " :code:`Client.evaluate` method and can be" -#~ " configured from the server side." +#~ "Flower provides a convenience class " +#~ "called ``NumPyClient`` which makes it " +#~ "easier to implement the ``Client`` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing ``NumPyClient`` " +#~ "usually means defining the following " +#~ "methods (``set_parameters`` is optional " +#~ "though):" #~ msgstr "" -#~ msgid "" -#~ ":code:`fraction_evaluate`: a :code:`float` defining" -#~ " the fraction of clients that will" -#~ " be selected for evaluation. If " -#~ ":code:`fraction_evaluate` is set to " -#~ ":code:`0.1` and :code:`100` clients are " -#~ "connected to the server, then :code:`10`" -#~ " will be randomly selected for " -#~ "evaluation. If :code:`fraction_evaluate` is " -#~ "set to :code:`0.0`, federated evaluation " -#~ "will be disabled." +#~ msgid "return the model weight as a list of NumPy ndarrays" +#~ msgstr "" + +#~ msgid "``set_parameters`` (optional)" #~ msgstr "" #~ msgid "" -#~ ":code:`min_evaluate_clients`: an :code:`int`: the" -#~ " minimum number of clients to be " -#~ "selected for evaluation. If " -#~ ":code:`fraction_evaluate` is set to " -#~ ":code:`0.1`, :code:`min_evaluate_clients` is set " -#~ "to 20, and :code:`100` clients are " -#~ "connected to the server, then :code:`20`" -#~ " clients will be selected for " -#~ "evaluation." +#~ "update the local model weights with " +#~ "the parameters received from the server" +#~ msgstr "" + +#~ msgid "is directly imported with ``utils.set_model_params()``" #~ msgstr "" -#~ msgid "" -#~ ":code:`min_available_clients`: an :code:`int` that" -#~ " defines the minimum number of " -#~ "clients which need to be connected " -#~ "to the server before a round of" -#~ " federated evaluation can start. If " -#~ "fewer than :code:`min_available_clients` are " -#~ "connected to the server, the server " -#~ "will wait until more clients are " -#~ "connected before it continues to sample" -#~ " clients for evaluation." +#~ msgid "set the local model weights" #~ msgstr "" -#~ msgid "" -#~ ":code:`on_evaluate_config_fn`: a function that " -#~ "returns a configuration dictionary which " -#~ "will be sent to the selected " -#~ "clients. The function will be called " -#~ "during each round and provides a " -#~ "convenient way to customize client-side" -#~ " evaluation from the server side, for" -#~ " example, to configure the number of" -#~ " validation steps performed." +#~ msgid "train the local model" #~ msgstr "" -#~ msgid "" -#~ "Model parameters can also be evaluated" -#~ " during training. :code:`Client.fit` can " -#~ "return arbitrary evaluation results as a" -#~ " dictionary:" +#~ msgid "return the updated local model weights" #~ msgstr "" -#~ msgid "" -#~ "The same :code:`Strategy`-customization approach " -#~ "can be used to aggregate custom " -#~ "evaluation results coming from individual " -#~ "clients. Clients can return custom " -#~ "metrics to the server by returning " -#~ "a dictionary:" +#~ msgid "test the local model" #~ msgstr "" -#~ msgid "Enable node authentication in :code:`SuperLink`" +#~ msgid "The methods can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "To enable node authentication, first you" -#~ " need to configure SSL/TLS connections " -#~ "to secure the SuperLink<>SuperNode " -#~ "communication. You can find the complete" -#~ " guide `here `_. After " -#~ "configuring secure connections, you can " -#~ "enable client authentication in a " -#~ "long-running Flower :code:`SuperLink`. Use " -#~ "the following terminal command to start" -#~ " a Flower :code:`SuperNode` that has " -#~ "both secure connections and node " -#~ "authentication enabled:" +#~ "We can now create an instance of" +#~ " our class ``MnistClient`` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "The first flag :code:`--auth-list-" -#~ "public-keys` expects a path to a " -#~ "CSV file storing all known node " -#~ "public keys. You need to store all" -#~ " known node public keys that are " -#~ "allowed to participate in a federation" -#~ " in one CSV file (:code:`.csv`)." +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` or " +#~ "``NumPyClient`` and call " +#~ "``fl.client.start_client()``. If you implement " +#~ "a client of type ``NumPyClient`` you'll" +#~ " need to first call its " +#~ "``to_client()`` method. The string " +#~ "``\"0.0.0.0:8080\"`` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use ``\"0.0.0.0:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we pass to the client." #~ msgstr "" #~ msgid "" -#~ "The second and third flags :code" -#~ ":`--auth-superlink-private-key` and :code" -#~ ":`--auth-superlink-public-key` expect paths" -#~ " to the server's private and public" -#~ " keys. For development purposes, you " -#~ "can generate a private and public " -#~ "key pair using :code:`ssh-keygen -t " -#~ "ecdsa -b 384`." +#~ "The following Flower server is a " +#~ "little bit more advanced and returns " +#~ "an evaluation function for the " +#~ "server-side evaluation. First, we import" +#~ " again all required libraries such as" +#~ " Flower and scikit-learn." #~ msgstr "" -#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgid "``server.py``, import Flower and start the server:" #~ msgstr "" #~ msgid "" -#~ "Similar to the long-running Flower " -#~ "server (:code:`SuperLink`), you can easily " -#~ "enable node authentication in the " -#~ "long-running Flower client (:code:`SuperNode`)." -#~ " Use the following terminal command " -#~ "to start an authenticated :code:`SuperNode`:" +#~ "The number of federated learning rounds" +#~ " is set in ``fit_round()`` and the" +#~ " evaluation is defined in " +#~ "``get_evaluate_fn()``. The evaluation function " +#~ "is called after each federated learning" +#~ " round and gives you information " +#~ "about loss and accuracy. Note that " +#~ "we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." #~ msgstr "" #~ msgid "" -#~ "The :code:`--auth-supernode-private-key` " -#~ "flag expects a path to the node's" -#~ " private key file and the :code" -#~ ":`--auth-supernode-public-key` flag expects" -#~ " a path to the node's public " -#~ "key file. For development purposes, you" -#~ " can generate a private and public" -#~ " key pair using :code:`ssh-keygen -t" -#~ " ecdsa -b 384`." +#~ "The ``main`` contains the server-side" +#~ " parameter initialization " +#~ "``utils.set_initial_params()`` as well as the" +#~ " aggregation strategy ``fl.server.strategy:FedAvg()``." +#~ " The strategy is the default one, " +#~ "federated averaging (or FedAvg), with " +#~ "two clients and evaluation after each" +#~ " federated learning round. The server " +#~ "can be started with the command " +#~ "``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))``." #~ msgstr "" #~ msgid "" -#~ "You should now have learned how to" -#~ " start a long-running Flower server" -#~ " (:code:`SuperLink`) and client " -#~ "(:code:`SuperNode`) with node authentication " -#~ "enabled. You should also know the " -#~ "significance of the private key and " -#~ "store it safely to minimize security " -#~ "risks." +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We, " +#~ "therefore, have to start the server " +#~ "first:" #~ msgstr "" #~ msgid "" -#~ "The easiest way to send configuration" -#~ " values to clients is to use a" -#~ " built-in strategy like :code:`FedAvg`. " -#~ "Built-in strategies support so-called" -#~ " configuration functions. A configuration " -#~ "function is a function that the " -#~ "built-in strategy calls to get the" -#~ " configuration dictionary for the current" -#~ " round. It then forwards the " -#~ "configuration dictionary to all the " -#~ "clients selected during that round." +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Open a new terminal and " +#~ "start the first client:" #~ msgstr "" -#~ msgid "" -#~ "To make the built-in strategies " -#~ "use this function, we can pass it" -#~ " to ``FedAvg`` during initialization using" -#~ " the parameter :code:`on_fit_config_fn`:" +#~ msgid "Open another terminal and start the second client:" #~ msgstr "" -#~ msgid "The :code:`FedAvg` strategy will call this function *every round*." +#~ msgid "" +#~ "Each client will have its own " +#~ "dataset. You should now see how " +#~ "the training does in the very " +#~ "first terminal (the one that started " +#~ "the server):" #~ msgstr "" #~ msgid "" -#~ "This can be achieved by customizing " -#~ "an existing strategy or by " -#~ ":doc:`implementing a custom strategy from " -#~ "scratch `. " -#~ "Here's a nonsensical example that " -#~ "customizes :code:`FedAvg` by adding a " -#~ "custom ``\"hello\": \"world\"`` configuration " -#~ "key/value pair to the config dict " -#~ "of a *single client* (only the " -#~ "first client in the list, the " -#~ "other clients in this round to not" -#~ " receive this \"special\" config value):" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in ``examples/sklearn-logreg-" +#~ "mnist``." #~ msgstr "" -#~ msgid "" -#~ "containing relevant information including: log" -#~ " message level (e.g. :code:`INFO`, " -#~ ":code:`DEBUG`), a timestamp, the line " -#~ "where the logging took place from, " -#~ "as well as the log message itself." -#~ " In this way, the logger would " -#~ "typically display information on your " -#~ "terminal as follows:" +#~ msgid "Federated XGBoost" #~ msgstr "" -#~ msgid "" -#~ "By default, the Flower log is " -#~ "outputted to the terminal where you " -#~ "launch your Federated Learning workload " -#~ "from. This applies for both gRPC-" -#~ "based federation (i.e. when you do " -#~ ":code:`fl.server.start_server`) and when using " -#~ "the :code:`VirtualClientEngine` (i.e. when you" -#~ " do :code:`fl.simulation.start_simulation`). In " -#~ "some situations you might want to " -#~ "save this log to disk. You can " -#~ "do so by calling the " -#~ "`fl.common.logger.configure() " -#~ "`_" -#~ " function. For example:" +#~ msgid "Why federated XGBoost?" #~ msgstr "" #~ msgid "" -#~ "With the above, Flower will record " -#~ "the log you see on your terminal" -#~ " to :code:`log.txt`. This file will " -#~ "be created in the same directory " -#~ "as were you are running the code" -#~ " from. If we inspect we see the" -#~ " log above is also recorded but " -#~ "prefixing with :code:`identifier` each line:" +#~ "Indeed, as the demand for data " +#~ "privacy and decentralized learning grows, " +#~ "there's an increasing requirement to " +#~ "implement federated XGBoost systems for " +#~ "specialised applications, like survival " +#~ "analysis and financial fraud detection." #~ msgstr "" #~ msgid "" -#~ "The :code:`fl.common.logger.configure` function, " -#~ "also allows specifying a host to " -#~ "which logs can be pushed (via " -#~ ":code:`POST`) through a native Python " -#~ ":code:`logging.handler.HTTPHandler`. This is a " -#~ "particularly useful feature in " -#~ ":code:`gRPC`-based Federated Learning workloads " -#~ "where otherwise gathering logs from all" -#~ " entities (i.e. the server and the" -#~ " clients) might be cumbersome. Note " -#~ "that in Flower simulation, the server" -#~ " automatically displays all logs. You " -#~ "can still specify a :code:`HTTPHandler` " -#~ "should you wish to backup or " -#~ "analyze the logs somewhere else." +#~ "Federated learning ensures that raw data" +#~ " remains on the local device, making" +#~ " it an attractive approach for " +#~ "sensitive domains where data security " +#~ "and privacy are paramount. Given the " +#~ "robustness and efficiency of XGBoost, " +#~ "combining it with federated learning " +#~ "offers a promising solution for these" +#~ " specific challenges." #~ msgstr "" #~ msgid "" -#~ "This guide describes how to a " -#~ "SSL-enabled secure Flower server " -#~ "(:code:`SuperLink`) can be started and " -#~ "how a Flower client (:code:`SuperNode`) " -#~ "can establish a secure connections to" -#~ " it." +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " ``xgboost`` package. We use a simple" +#~ " example (`full code xgboost-quickstart " +#~ "`_) with two *clients* and " +#~ "one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" #~ msgid "" -#~ "The code example comes with a " -#~ ":code:`README.md` file which explains how " -#~ "to start it. Although it is " -#~ "already SSL-enabled, it might be " -#~ "less descriptive on how it does " -#~ "so. Stick to this guide for a " -#~ "deeper introduction to the topic." +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a :doc:`virtualenv " +#~ "`." #~ msgstr "" #~ msgid "" -#~ "Using SSL-enabled connections requires " -#~ "certificates to be passed to the " -#~ "server and client. For the purpose " -#~ "of this guide we are going to " -#~ "generate self-signed certificates. As " -#~ "this can become quite complex we " -#~ "are going to ask you to run " -#~ "the script in :code:`examples/advanced-" -#~ "tensorflow/certificates/generate.sh` with the " -#~ "following command sequence:" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. Now" +#~ " that we have all our dependencies" +#~ " installed, let's run a simple " +#~ "distributed training with two clients " +#~ "and one server." #~ msgstr "" #~ msgid "" -#~ "This will generate the certificates in" -#~ " :code:`examples/advanced-tensorflow/.cache/certificates`." +#~ "In a file called ``client.py``, import" +#~ " xgboost, Flower, Flower Datasets and " +#~ "other related functions:" +#~ msgstr "" + +#~ msgid "Dataset partition and hyper-parameter selection" #~ msgstr "" #~ msgid "" -#~ "When setting :code:`root_certificates`, the " -#~ "client expects a file path to " -#~ "PEM-encoded root certificates." +#~ "Prior to local training, we require " +#~ "loading the HIGGS dataset from Flower" +#~ " Datasets and conduct data partitioning " +#~ "for FL:" #~ msgstr "" -#~ msgid "The :code:`Strategy` abstraction" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (``IidPartitioner(num_partitions=30)``). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ "``partition_id``:" #~ msgstr "" #~ msgid "" -#~ "All strategy implementation are derived " -#~ "from the abstract base class " -#~ ":code:`flwr.server.strategy.Strategy`, both built-in" -#~ " implementations and third party " -#~ "implementations. This means that custom " -#~ "strategy implementations have the exact " -#~ "same capabilities at their disposal as" -#~ " built-in ones." +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for ``xgboost`` package." +#~ msgstr "" + +#~ msgid "Finally, we define the hyper-parameters used for XGBoost training." #~ msgstr "" #~ msgid "" -#~ "Creating a new strategy means " -#~ "implementing a new :code:`class` (derived " -#~ "from the abstract base class " -#~ ":code:`Strategy`) that implements for the " -#~ "previously shown abstract methods:" +#~ "The ``num_local_round`` represents the number" +#~ " of iterations for local tree boost." +#~ " We use CPU for the training in" +#~ " default. One can shift it to " +#~ "GPU by setting ``tree_method`` to " +#~ "``gpu_hist``. We use AUC as evaluation" +#~ " metric." #~ msgstr "" -#~ msgid "The :code:`initialize_parameters` method" +#~ msgid "Flower client definition for XGBoost" #~ msgstr "" #~ msgid "" -#~ ":code:`initialize_parameters` is called only " -#~ "once, at the very beginning of an" -#~ " execution. It is responsible for " -#~ "providing the initial global model " -#~ "parameters in a serialized form (i.e.," -#~ " as a :code:`Parameters` object)." +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define ``XgbClient`` " +#~ "class inherited from ``fl.client.Client``." #~ msgstr "" #~ msgid "" -#~ "Built-in strategies return user-provided" -#~ " initial parameters. The following example" -#~ " shows how initial parameters can be" -#~ " passed to :code:`FedAvg`:" +#~ "All required parameters defined above " +#~ "are passed to ``XgbClient``'s constructor." #~ msgstr "" #~ msgid "" -#~ "The Flower server will call " -#~ ":code:`initialize_parameters`, which either returns" -#~ " the parameters that were passed to" -#~ " :code:`initial_parameters`, or :code:`None`. If" -#~ " no parameters are returned from " -#~ ":code:`initialize_parameters` (i.e., :code:`None`), " -#~ "the server will randomly select one " -#~ "client and ask it to provide its" -#~ " parameters. This is a convenience " -#~ "feature and not recommended in practice," -#~ " but it can be useful for " -#~ "prototyping. In practice, it is " -#~ "recommended to always use server-side" -#~ " parameter initialization." +#~ "Then, we override ``get_parameters``, ``fit``" +#~ " and ``evaluate`` methods insides " +#~ "``XgbClient`` class as follows." #~ msgstr "" -#~ msgid "The :code:`configure_fit` method" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use ``get_parameters`` and" +#~ " ``set_parameters`` to initialise model " +#~ "parameters for XGBoost. As a result, " +#~ "let's return an empty tensor in " +#~ "``get_parameters`` when it is called by" +#~ " the server at the first round." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_fit` is responsible for " -#~ "configuring the upcoming round of " -#~ "training. What does *configure* mean in" -#~ " this context? Configuring a round " -#~ "means selecting clients and deciding " -#~ "what instructions to send to these " -#~ "clients. The signature of " -#~ ":code:`configure_fit` makes this clear:" +#~ "In ``fit``, at the first round, we" +#~ " call ``xgb.train()`` to build up the" +#~ " first set of trees. From the " +#~ "second round, we load the global " +#~ "model sent from server to new " +#~ "build Booster object, and then update" +#~ " model weights on local training data" +#~ " with function ``local_boost`` as follows:" #~ msgstr "" #~ msgid "" -#~ "The return value is a list of " -#~ "tuples, each representing the instructions " -#~ "that will be sent to a particular" -#~ " client. Strategy implementations usually " -#~ "perform the following steps in " -#~ ":code:`configure_fit`:" +#~ "Now, we can create an instance of" +#~ " our class ``XgbClient`` and add one" +#~ " line to actually run this client:" #~ msgstr "" #~ msgid "" -#~ "Use the :code:`client_manager` to randomly " -#~ "sample all (or a subset of) " -#~ "available clients (each represented as a" -#~ " :code:`ClientProxy` object)" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` and " +#~ "call ``fl.client.start_client()``. The string " +#~ "``\"[::]:8080\"`` tells the client which " +#~ "server to connect to. In our case" +#~ " we can run the server and the" +#~ " client on the same machine, " +#~ "therefore we use ``\"[::]:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we point the client at." #~ msgstr "" #~ msgid "" -#~ "Pair each :code:`ClientProxy` with the " -#~ "same :code:`FitIns` holding the current " -#~ "global model :code:`parameters` and " -#~ ":code:`config` dict" +#~ "These updates are then sent to the" +#~ " *server* which will aggregate them " +#~ "to produce a better model. Finally, " +#~ "the *server* sends this improved version" +#~ " of the model back to each " +#~ "*client* to finish a complete FL " +#~ "round." #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_fit` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the list returned from " -#~ ":code:`configure_fit`." +#~ "In a file named ``server.py``, import" +#~ " Flower and FedXgbBagging from " +#~ "``flwr.server.strategy``." +#~ msgstr "" + +#~ msgid "We first define a strategy for XGBoost bagging aggregation." #~ msgstr "" #~ msgid "" -#~ "The structure of this return value " -#~ "provides a lot of flexibility to " -#~ "the user. Since instructions are defined" -#~ " on a per-client basis, different " -#~ "instructions can be sent to each " -#~ "client. This enables custom strategies " -#~ "to train, for example, different models" -#~ " on different clients, or use " -#~ "different hyperparameters on different clients" -#~ " (via the :code:`config` dict)." +#~ "We use two clients for this " +#~ "example. An ``evaluate_metrics_aggregation`` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The ``config_func`` function is " +#~ "to return the current FL round " +#~ "number to client's ``fit()`` and " +#~ "``evaluate()`` methods." #~ msgstr "" -#~ msgid "The :code:`aggregate_fit` method" +#~ msgid "Then, we start the server:" #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit` is responsible for " -#~ "aggregating the results returned by the" -#~ " clients that were selected and asked" -#~ " to train in :code:`configure_fit`." +#~ msgid "Tree-based bagging aggregation" #~ msgstr "" #~ msgid "" -#~ "Of course, failures can happen, so " -#~ "there is no guarantee that the " -#~ "server will get results from all " -#~ "the clients it sent instructions to " -#~ "(via :code:`configure_fit`). :code:`aggregate_fit` " -#~ "therefore receives a list of " -#~ ":code:`results`, but also a list of " -#~ ":code:`failures`." +#~ "After traversal of all clients' models," +#~ " a new global model is generated, " +#~ "followed by the serialisation, and " +#~ "sending back to each client." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit` returns an optional " -#~ ":code:`Parameters` object and a dictionary " -#~ "of aggregated metrics. The :code:`Parameters`" -#~ " return value is optional because " -#~ ":code:`aggregate_fit` might decide that the" -#~ " results provided are not sufficient " -#~ "for aggregation (e.g., too many " -#~ "failures)." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in ``metrics_distributed``. One can" +#~ " see that the average AUC increases" +#~ " over FL rounds." #~ msgstr "" -#~ msgid "The :code:`configure_evaluate` method" +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in ``examples/xgboost-quickstart``." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_evaluate` is responsible for " -#~ "configuring the upcoming round of " -#~ "evaluation. What does *configure* mean " -#~ "in this context? Configuring a round " -#~ "means selecting clients and deciding " -#~ "what instructions to send to these " -#~ "clients. The signature of " -#~ ":code:`configure_evaluate` makes this clear:" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support :doc:`Flower simulation" +#~ " ` making it" +#~ " easy to simulate large client " +#~ "cohorts in a resource-aware manner. " +#~ "Let's take a look!" #~ msgstr "" -#~ msgid "" -#~ "The return value is a list of " -#~ "tuples, each representing the instructions " -#~ "that will be sent to a particular" -#~ " client. Strategy implementations usually " -#~ "perform the following steps in " -#~ ":code:`configure_evaluate`:" +#~ msgid "Cyclic training" #~ msgstr "" #~ msgid "" -#~ "Pair each :code:`ClientProxy` with the " -#~ "same :code:`EvaluateIns` holding the current" -#~ " global model :code:`parameters` and " -#~ ":code:`config` dict" +#~ "To do this, we first customise a" +#~ " ``ClientManager`` in ``server_utils.py``:" #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_evaluate` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the list returned from " -#~ ":code:`configure_evaluate`." +#~ "The customised ``ClientManager`` samples all" +#~ " available clients in each FL round" +#~ " based on the order of connection " +#~ "to the server. Then, we define a" +#~ " new strategy ``FedXgbCyclic`` in " +#~ "``flwr.server.strategy.fedxgb_cyclic.py``, in order " +#~ "to sequentially select only one client" +#~ " in given round and pass the " +#~ "received model to next client." +#~ msgstr "" + +#~ msgid "Customised data partitioning" #~ msgstr "" #~ msgid "" -#~ "The structure of this return value " -#~ "provides a lot of flexibility to " -#~ "the user. Since instructions are defined" -#~ " on a per-client basis, different " -#~ "instructions can be sent to each " -#~ "client. This enables custom strategies " -#~ "to evaluate, for example, different " -#~ "models on different clients, or use " -#~ "different hyperparameters on different clients" -#~ " (via the :code:`config` dict)." +#~ "In ``dataset.py``, we have a function" +#~ " ``instantiate_partitioner`` to instantiate the" +#~ " data partitioner based on the given" +#~ " ``num_partitions`` and ``partitioner_type``. " +#~ "Currently, we provide four supported " +#~ "partitioner type to simulate the " +#~ "uniformity/non-uniformity in data quantity " +#~ "(uniform, linear, square, exponential)." #~ msgstr "" -#~ msgid "The :code:`aggregate_evaluate` method" +#~ msgid "Customised centralised/distributed evaluation" #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_evaluate` is responsible for " -#~ "aggregating the results returned by the" -#~ " clients that were selected and asked" -#~ " to evaluate in :code:`configure_evaluate`." +#~ "To facilitate centralised evaluation, we " +#~ "define a function in ``server_utils.py``:" #~ msgstr "" #~ msgid "" -#~ "Of course, failures can happen, so " -#~ "there is no guarantee that the " -#~ "server will get results from all " -#~ "the clients it sent instructions to " -#~ "(via :code:`configure_evaluate`). " -#~ ":code:`aggregate_evaluate` therefore receives a " -#~ "list of :code:`results`, but also a " -#~ "list of :code:`failures`." +#~ "This function returns a evaluation " +#~ "function which instantiates a ``Booster`` " +#~ "object and loads the global model " +#~ "weights to it. The evaluation is " +#~ "conducted by calling ``eval_set()`` method," +#~ " and the tested AUC value is " +#~ "reported." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_evaluate` returns an optional " -#~ ":code:`float` (loss) and a dictionary of" -#~ " aggregated metrics. The :code:`float` " -#~ "return value is optional because " -#~ ":code:`aggregate_evaluate` might decide that " -#~ "the results provided are not sufficient" -#~ " for aggregation (e.g., too many " -#~ "failures)." +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ "``evaluate()`` method insides the " +#~ "``XgbClient`` class in ``client_utils.py``." #~ msgstr "" -#~ msgid "The :code:`evaluate` method" +#~ msgid "Flower simulation" #~ msgstr "" #~ msgid "" -#~ ":code:`evaluate` is responsible for evaluating" -#~ " model parameters on the server-side." -#~ " Having :code:`evaluate` in addition to " -#~ ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables" -#~ " strategies to perform both servers-" -#~ "side and client-side (federated) " -#~ "evaluation." +#~ "We also provide an example code " +#~ "(``sim.py``) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" #~ msgid "" -#~ "The return value is again optional " -#~ "because the strategy might not need " -#~ "to implement server-side evaluation or" -#~ " because the user-defined :code:`evaluate`" -#~ " method might not complete successfully " -#~ "(e.g., it might fail to load the" -#~ " server-side evaluation data)." +#~ "After importing all required packages, " +#~ "we define a ``main()`` function to " +#~ "perform the simulation process:" #~ msgstr "" #~ msgid "" -#~ "Stable releases are available on `PyPI" -#~ " `_::" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ "``list``. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "Then, we define the strategies and other hyper-parameters:" #~ msgstr "" #~ msgid "" -#~ "For simulations that use the Virtual " -#~ "Client Engine, ``flwr`` should be " -#~ "installed with the ``simulation`` extra::" +#~ "After that, we start the simulation " +#~ "by calling ``fl.simulation.start_simulation``:" #~ msgstr "" #~ msgid "" -#~ "If you have not added ``conda-" -#~ "forge`` to your channels, you will " -#~ "first need to run the following::" +#~ "One of key parameters for " +#~ "``start_simulation`` is ``client_fn`` which " +#~ "returns a function to construct a " +#~ "client. We define it as follows:" +#~ msgstr "" + +#~ msgid "Arguments parser" #~ msgstr "" #~ msgid "" -#~ "Once the ``conda-forge`` channel has " -#~ "been enabled, ``flwr`` can be installed" -#~ " with ``conda``::" +#~ "In ``utils.py``, we define the arguments" +#~ " parsers for clients, server and " +#~ "simulation, allowing users to specify " +#~ "different experimental settings. Let's first" +#~ " see the sever side:" #~ msgstr "" -#~ msgid "or with ``mamba``::" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ "``--centralised-eval``, the sever will do" +#~ " centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "Then, the argument parser on client side:" #~ msgstr "" #~ msgid "" -#~ "New (possibly unstable) versions of " -#~ "Flower are sometimes available as " -#~ "pre-release versions (alpha, beta, release" -#~ " candidate) before the stable release " -#~ "happens::" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " ``--centralised-eval``, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting ``--scaled-lr``." +#~ msgstr "" + +#~ msgid "We also have an argument parser for simulation:" #~ msgstr "" -#~ msgid "" -#~ "For simulations that use the Virtual " -#~ "Client Engine, ``flwr`` pre-releases " -#~ "should be installed with the " -#~ "``simulation`` extra::" +#~ msgid "This integrates all arguments for both client and server sides." #~ msgstr "" -#~ msgid "" -#~ "The latest (potentially unstable) changes " -#~ "in Flower are available as nightly " -#~ "releases::" +#~ msgid "Example commands" #~ msgstr "" #~ msgid "" -#~ "For simulations that use the Virtual " -#~ "Client Engine, ``flwr-nightly`` should " -#~ "be installed with the ``simulation`` " -#~ "extra::" +#~ "To run a centralised evaluated " +#~ "experiment with bagging strategy on 5" +#~ " clients with exponential distribution for" +#~ " 50 rounds, we first start the " +#~ "server as below:" #~ msgstr "" -#~ msgid "You can look at everything at ``_ ." +#~ msgid "Then, on each client terminal, we start the clients:" #~ msgstr "" -#~ msgid "" -#~ "After you finish the visualization, stop" -#~ " Prometheus and Grafana. This is " -#~ "important as they will otherwise block," -#~ " for example port :code:`3000` on " -#~ "your machine as long as they are" -#~ " running." +#~ msgid "To run the same experiment with Flower simulation:" #~ msgstr "" #~ msgid "" -#~ "In the example above, only one " -#~ "client will be run, so your " -#~ "clients won't run concurrently. Setting " -#~ ":code:`client_num_gpus = 0.5` would allow " -#~ "running two clients and therefore enable" -#~ " them to run concurrently. Be careful" -#~ " not to require more resources than" -#~ " available. If you specified " -#~ ":code:`client_num_gpus = 2`, the simulation" -#~ " wouldn't start (even if you had " -#~ "2 GPUs but decided to set 1 " -#~ "in :code:`ray_init_args`)." +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in ``examples" +#~ "/xgboost-comprehensive``." #~ msgstr "" #~ msgid "" -#~ "Q: I see \"This site can't be " -#~ "reached\" when going to " -#~ "``_." +#~ "🧑‍🏫 This tutorial starts at zero " +#~ "and expects no familiarity with " +#~ "federated learning. Only a basic " +#~ "understanding of data science and Python" +#~ " programming is assumed." #~ msgstr "" -#~ msgid "" -#~ "Ray Dashboard: ``_" +#~ msgid "Classic machine learning" #~ msgstr "" -#~ msgid "Ray Metrics: ``_" +#~ msgid "" +#~ "Before we begin to discuss federated " +#~ "learning, let us quickly recap how " +#~ "most machine learning works today." #~ msgstr "" -#~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_client " -#~ "`_) in the" -#~ " sense that they can be configure " -#~ "by creating a class inheriting, for " -#~ "example, from `flwr.client.NumPyClient `_ and therefore" -#~ " behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ msgid "|ac0a9766e26044d6aea222a829859b20|" #~ msgstr "" -#~ msgid "" -#~ "self-managed: this means that you " -#~ "as a user do not need to " -#~ "launch clients manually, instead this " -#~ "gets delegated to :code:`VirtualClientEngine`'s " -#~ "internals." +#~ msgid "|36cd6e248b1443ce8a82b5a025bba368|" #~ msgstr "" #~ msgid "" -#~ "The :code:`VirtualClientEngine` implements `virtual`" -#~ " clients using `Ray `_, " -#~ "an open-source framework for scalable" -#~ " Python workloads. In particular, Flower's" -#~ " :code:`VirtualClientEngine` makes use of " -#~ "`Actors `_ to spawn `virtual` clients" -#~ " and run their workload." +#~ "Now, in practice, the training data " +#~ "we work with doesn't originate on " +#~ "the machine we train the model on." +#~ " It gets created somewhere else." #~ msgstr "" #~ msgid "" -#~ "By default the VCE has access to" -#~ " all system resources (i.e. all CPUs," -#~ " all GPUs, etc) since that is " -#~ "also the default behavior when starting" -#~ " Ray. However, in some settings you" -#~ " might want to limit how many " -#~ "of your system resources are used " -#~ "for simulation. You can do this " -#~ "via the :code:`ray_init_args` input argument" -#~ " to :code:`start_simulation` which the VCE" -#~ " internally passes to Ray's " -#~ ":code:`ray.init` command. For a complete " -#~ "list of settings you can configure " -#~ "check the `ray.init `_ " -#~ "documentation. Do not set " -#~ ":code:`ray_init_args` if you want the " -#~ "VCE to use all your system's CPUs" -#~ " and GPUs." +#~ "It originates on a smartphone by " +#~ "the user interacting with an app, " +#~ "a car collecting sensor data, a " +#~ "laptop receiving input via the keyboard," +#~ " or a smart speaker listening to " +#~ "someone trying to sing a song." #~ msgstr "" -#~ msgid "" -#~ "By default the :code:`VirtualClientEngine` " -#~ "assigns a single CPU core (and " -#~ "nothing else) to each virtual client." -#~ " This means that if your system " -#~ "has 10 cores, that many virtual " -#~ "clients can be concurrently running." +#~ msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" #~ msgstr "" -#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" #~ msgstr "" #~ msgid "" -#~ ":code:`num_gpus` indicates the **ratio** of" -#~ " GPU memory a client gets assigned." +#~ "So to use machine learning, or any" +#~ " kind of data analysis, the approach" +#~ " that has been used in the past" +#~ " was to collect all data on a" +#~ " central server. This server can be" +#~ " somewhere in a data center, or " +#~ "somewhere in the cloud." #~ msgstr "" -#~ msgid "" -#~ "While the :code:`client_resources` can be " -#~ "used to control the degree of " -#~ "concurrency in your FL simulation, this" -#~ " does not stop you from running " -#~ "dozens, hundreds or even thousands of" -#~ " clients in the same round and " -#~ "having orders of magnitude more " -#~ "`dormant` (i.e. not participating in a" -#~ " round) clients. Let's say you want" -#~ " to have 100 clients per round " -#~ "but your system can only accommodate " -#~ "8 clients concurrently. The " -#~ ":code:`VirtualClientEngine` will schedule 100 " -#~ "jobs to run (each simulating a " -#~ "client sampled by the strategy) and " -#~ "then will execute them in a " -#~ "resource-aware manner in batches of " -#~ "8." +#~ msgid "|7605632e1b0f49599ffacf841491fcfb|" #~ msgstr "" -#~ msgid "" -#~ "Flower's :code:`VirtualClientEngine` allows you " -#~ "to run FL simulations across multiple" -#~ " compute nodes. Before starting your " -#~ "multi-node simulation ensure that you:" +#~ msgid "|91b1b5a7d3484eb7a2350c1923f18307|" #~ msgstr "" #~ msgid "" -#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " -#~ "`start_simulation `_ so the " -#~ ":code:`VirtualClientEngine` attaches to a " -#~ "running Ray instance." +#~ "The classic machine learning approach " +#~ "we've just seen can be used in " +#~ "some cases. Great examples include " +#~ "categorizing holiday photos, or analyzing " +#~ "web traffic. Cases, where all the " +#~ "data is naturally available on a " +#~ "centralized server." #~ msgstr "" -#~ msgid "" -#~ "Start Ray on you head node: on " -#~ "the terminal type :code:`ray start " -#~ "--head`. This command will print a " -#~ "few lines, one of which indicates " -#~ "how to attach other nodes to the" -#~ " head node." +#~ msgid "|5405ed430e4746e28b083b146fb71731|" #~ msgstr "" -#~ msgid "" -#~ "Attach other nodes to the head " -#~ "node: copy the command shown after " -#~ "starting the head and execute it " -#~ "on terminal of a new node: for " -#~ "example :code:`ray start " -#~ "--address='192.168.1.132:6379'`" +#~ msgid "|a389e87dab394eb48a8949aa2397687b|" #~ msgstr "" #~ msgid "" -#~ "Once your simulation is finished, if " -#~ "you'd like to dismantle your cluster " -#~ "you simply need to run the command" -#~ " :code:`ray stop` in each node's " -#~ "terminal (including the head node)." +#~ "There are many reasons why the " +#~ "classic centralized machine learning approach" +#~ " does not work for a large " +#~ "number of highly important real-world" +#~ " use cases. Those reasons include:" #~ msgstr "" #~ msgid "" -#~ "User :code:`ray status` to check all " -#~ "nodes connected to your head node " -#~ "as well as the total resources " -#~ "available to the :code:`VirtualClientEngine`." +#~ "**Regulations**: GDPR (Europe), CCPA " +#~ "(California), PIPEDA (Canada), LGPD (Brazil)," +#~ " PDPL (Argentina), KVKK (Turkey), POPI " +#~ "(South Africa), FSS (Russia), CDPR " +#~ "(China), PDPB (India), PIPA (Korea), " +#~ "APPI (Japan), PDP (Indonesia), PDPA " +#~ "(Singapore), APP (Australia), and other " +#~ "regulations protect sensitive data from " +#~ "being moved. In fact, those regulations" +#~ " sometimes even prevent single " +#~ "organizations from combining their own " +#~ "users' data for artificial intelligence " +#~ "training because those users live in " +#~ "different parts of the world, and " +#~ "their data is governed by different " +#~ "data protection regulations." #~ msgstr "" #~ msgid "" -#~ "When attaching a new node to the" -#~ " head, all its resources (i.e. all" -#~ " CPUs, all GPUs) will be visible " -#~ "by the head node. This means that" -#~ " the :code:`VirtualClientEngine` can schedule " -#~ "as many `virtual` clients as that " -#~ "node can possible run. In some " -#~ "settings you might want to exclude " -#~ "certain resources from the simulation. " -#~ "You can do this by appending " -#~ "`--num-cpus=` and/or `--num-" -#~ "gpus=` in any :code:`ray " -#~ "start` command (including when starting " -#~ "the head)" +#~ "Sensitive healthcare records from multiple " +#~ "hospitals to train cancer detection " +#~ "models" #~ msgstr "" #~ msgid "" -#~ "The VCE assigns a share of GPU " -#~ "memory to a client that specifies " -#~ "the key :code:`num_gpus` in " -#~ ":code:`client_resources`. This being said, Ray" -#~ " (used internally by the VCE) is " -#~ "by default:" +#~ "Financial information from different " +#~ "organizations to detect financial fraud" #~ msgstr "" -#~ msgid "" -#~ "not aware of the total VRAM " -#~ "available on the GPUs. This means " -#~ "that if you set :code:`num_gpus=0.5` and" -#~ " you have two GPUs in your " -#~ "system with different (e.g. 32GB and " -#~ "8GB) VRAM amounts, they both would " -#~ "run 2 clients concurrently." +#~ msgid "Location data from your electric car to make better range prediction" #~ msgstr "" -#~ msgid "" -#~ "If you want to run several " -#~ "independent Flower simulations on the " -#~ "same machine you need to mask-out" -#~ " your GPUs with " -#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" -#~ " your experiment." +#~ msgid "End-to-end encrypted messages to train better auto-complete models" #~ msgstr "" -#~ msgid "" -#~ "In addition, the GPU resource limits " -#~ "passed to :code:`client_resources` are not " -#~ "`enforced` (i.e. they can be exceeded)" -#~ " which can result in the situation" -#~ " of client using more VRAM than " -#~ "the ratio specified when starting the" -#~ " simulation." +#~ msgid "Federated learning" #~ msgstr "" #~ msgid "" -#~ "This would need to be done in " -#~ "the main process (which is where " -#~ "the server would run) and in each" -#~ " Actor created by the VCE. By " -#~ "means of :code:`actor_kwargs` we can " -#~ "pass the reserved key `\"on_actor_init_fn\"`" -#~ " in order to specify a function " -#~ "to be executed upon actor " -#~ "initialization. In this case, to enable" -#~ " GPU growth for TF workloads. It " -#~ "would look as follows:" +#~ "Federated learning simply reverses this " +#~ "approach. It enables machine learning on" +#~ " distributed data by moving the " +#~ "training to the data, instead of " +#~ "moving the data to the training. " +#~ "Here's the single-sentence explanation:" #~ msgstr "" -#~ msgid "" -#~ "Model updates can be persisted on " -#~ "the server-side by customizing " -#~ ":code:`Strategy` methods. Implementing custom " -#~ "strategies is always an option, but " -#~ "for many cases it may be more " -#~ "convenient to simply customize an " -#~ "existing strategy. The following code " -#~ "example defines a new " -#~ ":code:`SaveModelStrategy` which customized the " -#~ "existing built-in :code:`FedAvg` strategy. " -#~ "In particular, it customizes " -#~ ":code:`aggregate_fit` by calling " -#~ ":code:`aggregate_fit` in the base class " -#~ "(:code:`FedAvg`). It then continues to " -#~ "save returned (aggregated) weights before " -#~ "it returns those aggregated weights to" -#~ " the caller (i.e., the server):" +#~ msgid "Central machine learning: move the data to the computation" #~ msgstr "" -#~ msgid "" -#~ "For central DP with server-side " -#~ "clipping, there are two :code:`Strategy` " -#~ "classes that act as wrappers around " -#~ "the actual :code:`Strategy` instance (for " -#~ "example, :code:`FedAvg`). The two wrapper " -#~ "classes are " -#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " -#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " -#~ "fixed and adaptive clipping." +#~ msgid "Federated (machine) learning: move the computation to the data" #~ msgstr "" #~ msgid "" -#~ "The code sample below enables the " -#~ ":code:`FedAvg` strategy to use server-" -#~ "side fixed clipping using the " -#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " -#~ "class. The same approach can be " -#~ "used with " -#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " -#~ "adjusting the corresponding input parameters." +#~ "By doing so, it enables us to " +#~ "use machine learning (and other data " +#~ "science approaches) in areas where it" +#~ " wasn't possible before. We can now" +#~ " train excellent medical AI models by" +#~ " enabling different hospitals to work " +#~ "together. We can solve financial fraud" +#~ " by training AI models on the " +#~ "data of different financial institutions. " +#~ "We can build novel privacy-enhancing " +#~ "applications (such as secure messaging) " +#~ "that have better built-in AI than" +#~ " their non-privacy-enhancing alternatives." +#~ " And those are just a few of" +#~ " the examples that come to mind. " +#~ "As we deploy federated learning, we " +#~ "discover more and more areas that " +#~ "can suddenly be reinvented because they" +#~ " now have access to vast amounts " +#~ "of previously inaccessible data." #~ msgstr "" #~ msgid "" -#~ "For central DP with client-side " -#~ "clipping, the server sends the clipping" -#~ " value to selected clients on each" -#~ " round. Clients can use existing " -#~ "Flower :code:`Mods` to perform the " -#~ "clipping. Two mods are available for " -#~ "fixed and adaptive client-side clipping:" -#~ " :code:`fixedclipping_mod` and " -#~ ":code:`adaptiveclipping_mod` with corresponding " -#~ "server-side wrappers " -#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " -#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +#~ "So how does federated learning work, " +#~ "exactly? Let's start with an intuitive" +#~ " explanation." +#~ msgstr "" + +#~ msgid "|89c412136a5146ec8dc32c0973729f12|" #~ msgstr "" #~ msgid "" -#~ "The code sample below enables the " -#~ ":code:`FedAvg` strategy to use differential" -#~ " privacy with client-side fixed " -#~ "clipping using both the " -#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " -#~ "class and, on the client, " -#~ ":code:`fixedclipping_mod`:" +#~ "Next, we send the parameters of " +#~ "the global model to the connected " +#~ "client nodes (think: edge devices like" +#~ " smartphones or servers belonging to " +#~ "organizations). This is to ensure that" +#~ " each participating node starts their " +#~ "local training using the same model " +#~ "parameters. We often use only a " +#~ "few of the connected nodes instead " +#~ "of all nodes. The reason for this" +#~ " is that selecting more and more " +#~ "client nodes has diminishing returns." #~ msgstr "" -#~ msgid "" -#~ "In addition to the server-side " -#~ "strategy wrapper, the :code:`ClientApp` needs" -#~ " to configure the matching " -#~ ":code:`fixedclipping_mod` to perform the " -#~ "client-side clipping:" +#~ msgid "|9503d3dc3a144e8aa295f8800cd8a766|" #~ msgstr "" -#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#~ msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +#~ msgstr "" + +#~ msgid "|a7579ad7734347508e959d9e14f2f53d|" #~ msgstr "" #~ msgid "" -#~ "Flower allows full customization of the" -#~ " learning process through the " -#~ ":code:`Strategy` abstraction. A number of " -#~ "built-in strategies are provided in " -#~ "the core framework." +#~ "In order to get one single model," +#~ " we have to combine all the " +#~ "model updates we received from the " +#~ "client nodes. This process is called " +#~ "*aggregation*, and there are many " +#~ "different ways to do it. The most" +#~ " basic way to do it is called" +#~ " *Federated Averaging* (`McMahan et al.," +#~ " 2016 `__), often " +#~ "abbreviated as *FedAvg*. *FedAvg* takes " +#~ "the 100 model updates and, as the" +#~ " name suggests, averages them. To be" +#~ " more precise, it takes the *weighted" +#~ " average* of the model updates, " +#~ "weighted by the number of examples " +#~ "each client used for training. The " +#~ "weighting is important to make sure " +#~ "that each data example has the " +#~ "same \"influence\" on the resulting " +#~ "global model. If one client has 10" +#~ " examples, and another client has 100" +#~ " examples, then - without weighting -" +#~ " each of the 10 examples would " +#~ "influence the global model ten times " +#~ "as much as each of the 100 " +#~ "examples." #~ msgstr "" -#~ msgid "Use an existing strategy, for example, :code:`FedAvg`" +#~ msgid "|73d15dd1d4fc41678b2d54815503fbe8|" #~ msgstr "" -#~ msgid "" -#~ "This creates a strategy with all " -#~ "parameters left at their default values" -#~ " and passes it to the " -#~ ":code:`start_server` function. It is usually" -#~ " recommended to adjust a few " -#~ "parameters during instantiation:" +#~ msgid "Federated analytics" #~ msgstr "" -#~ msgid "" -#~ "The server can pass new configuration" -#~ " values to the client each round " -#~ "by providing a function to " -#~ ":code:`on_fit_config_fn`. The provided function " -#~ "will be called by the strategy and" -#~ " must return a dictionary of " -#~ "configuration key values pairs that will" -#~ " be sent to the client. It must" -#~ " return a dictionary of arbitrary " -#~ "configuration values :code:`client.fit` and " -#~ ":code:`client.evaluate` functions during each " -#~ "round of federated learning." +#~ msgid "|55472eef61274ba1b739408607e109df|" #~ msgstr "" #~ msgid "" -#~ "The :code:`on_fit_config_fn` can be used " -#~ "to pass arbitrary configuration values " -#~ "from server to client, and potentially" -#~ " change these values each round, for" -#~ " example, to adjust the learning " -#~ "rate. The client will receive the " -#~ "dictionary returned by the " -#~ ":code:`on_fit_config_fn` in its own " -#~ ":code:`client.fit()` function." +#~ "Run ``python3 src/py/flwr_tool/update_changelog.py " +#~ "`` in order to add every" +#~ " new change to the changelog (feel" +#~ " free to make manual changes to " +#~ "the changelog afterwards until it looks" +#~ " good)." #~ msgstr "" #~ msgid "" -#~ "Similar to :code:`on_fit_config_fn`, there is" -#~ " also :code:`on_evaluate_config_fn` to customize" -#~ " the configuration sent to " -#~ ":code:`client.evaluate()`" +#~ "When operating in a production " +#~ "environment, it is strongly recommended " +#~ "to enable Transport Layer Security (TLS)" +#~ " for each Flower Component to ensure" +#~ " secure communication." #~ msgstr "" #~ msgid "" -#~ "Server-side evaluation can be enabled" -#~ " by passing an evaluation function to" -#~ " :code:`evaluate_fn`." +#~ "To enable TLS, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "SuperLink" #~ msgstr "" #~ msgid "" -#~ "Note that since version :code:`1.11.0`, " -#~ ":code:`flower-server-app` no longer " -#~ "supports passing a reference to a " -#~ "`ServerApp` attribute. Instead, you need " -#~ "to pass the path to Flower app " -#~ "via the argument :code:`--app`. This is" -#~ " the path to a directory containing" -#~ " a `pyproject.toml`. You can create a" -#~ " valid Flower app by executing " -#~ ":code:`flwr new` and following the " -#~ "prompt." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container:" #~ msgstr "" #~ msgid "" -#~ "The following examples are available as" -#~ " standalone projects. Quickstart TensorFlow/Keras" -#~ " ---------------------------" +#~ "``--volume ./certificates/:/app/certificates/:ro``: Mount" +#~ " the ``certificates`` directory in" #~ msgstr "" #~ msgid "" -#~ "Let's create a new application project" -#~ " in Xcode and add :code:`flwr` as " -#~ "a dependency in your project. For " -#~ "our application, we will store the " -#~ "logic of our app in " -#~ ":code:`FLiOSModel.swift` and the UI elements" -#~ " in :code:`ContentView.swift`. We will " -#~ "focus more on :code:`FLiOSModel.swift` in " -#~ "this quickstart. Please refer to the " -#~ "`full code example " -#~ "`_ to " -#~ "learn more about the app." +#~ "the current working directory of the " +#~ "host machine as a read-only volume" +#~ " at the" #~ msgstr "" -#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgid "``/app/certificates`` directory inside the container." #~ msgstr "" -#~ msgid "" -#~ "Then add the mlmodel to the " -#~ "project simply by drag-and-drop, " -#~ "the mlmodel will be bundled inside " -#~ "the application during deployment to " -#~ "your iOS device. We need to pass" -#~ " the url to access mlmodel and " -#~ "run CoreML machine learning processes, " -#~ "it can be retrieved by calling the" -#~ " function :code:`Bundle.main.url`. For the " -#~ "MNIST dataset, we need to preprocess " -#~ "it into :code:`MLBatchProvider` object. The" -#~ " preprocessing is done inside " -#~ ":code:`DataLoader.swift`." +#~ msgid "SuperNode" #~ msgstr "" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this information beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "Assuming that the ``ca.crt`` certificate " +#~ "already exists locally, we can use " +#~ "the flag ``--volume`` to mount the " +#~ "local certificate into the container's " +#~ "``/app/`` directory." #~ msgstr "" #~ msgid "" -#~ "Then start the Flower gRPC client " -#~ "and start communicating to the server" -#~ " by passing our Flower client to " -#~ "the function :code:`startFlwrGRPC`." +#~ "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the " +#~ "``ca.crt`` file from the" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ "call the provided :code:`MLFlwrClient` and " -#~ "call :code:`startFlwrGRPC()`. The attribute " -#~ ":code:`hostname` and :code:`port` tells the" -#~ " client which server to connect to." -#~ " This can be done by entering " -#~ "the hostname and port in the " -#~ "application before clicking the start " -#~ "button to start the federated learning" -#~ " process." +#~ "current working directory of the host" +#~ " machine as a read-only volume " +#~ "at the ``/app/ca.crt``" #~ msgstr "" -#~ msgid "" -#~ "For simple workloads we can start " -#~ "a Flower server and leave all the" -#~ " configuration possibilities at their " -#~ "default values. In a file named " -#~ ":code:`server.py`, import Flower and start " -#~ "the server:" +#~ msgid "SuperExec" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system in your ios device. The " -#~ "full `source code " -#~ "`_ for" -#~ " this example can be found in " -#~ ":code:`examples/ios`." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory where" +#~ " the SuperExec will be executed from," +#~ " we can use the flag ``--volume`` " +#~ "to mount the local directory into " +#~ "the ``/app/certificates/`` directory of the" +#~ " container:" #~ msgstr "" #~ msgid "" -#~ "Before we start building our JAX " -#~ "example, we need install the packages" -#~ " :code:`jax`, :code:`jaxlib`, :code:`scikit-" -#~ "learn`, and :code:`flwr`:" +#~ ":substitution-code:`flwr/superexec:|stable_flwr_version|`: " +#~ "The name of the image to be " +#~ "run and the specific" +#~ msgstr "" + +#~ msgid "SuperExec." #~ msgstr "" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Linear Regression` model. " -#~ "If you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `JAX" -#~ " documentation `_." +#~ "``--ssl-certfile certificates/server.pem``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "Let's create a new file called " -#~ ":code:`jax_training.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) linear regression training. " -#~ "First, the JAX packages :code:`jax` and" -#~ " :code:`jaxlib` need to be imported. " -#~ "In addition, we need to import " -#~ ":code:`sklearn` since we use " -#~ ":code:`make_regression` for the dataset and" -#~ " :code:`train_test_split` to split the " -#~ "dataset into a training and test " -#~ "set. You can see that we do " -#~ "not yet import the :code:`flwr` package" -#~ " for federated learning. This will be" -#~ " done later." +#~ "The ``certificates/server.pem`` file is used" +#~ " to identify the SuperExec and to " +#~ "encrypt the" #~ msgstr "" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "mentioned training and test sets." +#~ "``--ssl-keyfile certificates/server.key``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" #~ msgid "" -#~ "The model architecture (a very simple" -#~ " :code:`Linear Regression` model) is " -#~ "defined in :code:`load_model()`." +#~ "``--executor-config root-" +#~ "certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify" +#~ " the" #~ msgstr "" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`), which loops " -#~ "over the training set and measures " -#~ "the loss (function :code:`loss_fn()`) for " -#~ "each batch of training examples. The " -#~ "loss function is separate since JAX " -#~ "takes derivatives with a :code:`grad()` " -#~ "function (defined in the :code:`main()` " -#~ "function and called in :code:`train()`)." +#~ "location of the CA certificate file " +#~ "inside the container that the SuperExec" +#~ " executor" +#~ msgstr "" + +#~ msgid "should use to verify the SuperLink's identity." +#~ msgstr "" + +#~ msgid "Run ClientApp as a Subprocess" #~ msgstr "" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in the function :code:`evaluation()`." -#~ " The function takes all test examples" -#~ " and measures the loss of the " -#~ "linear regression model." +#~ "In this mode, the ClientApp is " +#~ "executed as a subprocess within the " +#~ "SuperNode Docker container, rather than " +#~ "running in a separate container. This" +#~ " approach reduces the number of " +#~ "running containers, which can be " +#~ "beneficial for environments with limited " +#~ "resources. However, it also means that" +#~ " the ClientApp is no longer isolated" +#~ " from the SuperNode, which may " +#~ "introduce additional security concerns." #~ msgstr "" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model using JAX. As " -#~ "already mentioned, the :code:`jax.grad()` " -#~ "function is defined in :code:`main()` " -#~ "and passed to :code:`train()`." +#~ "Before running the ClientApp as a " +#~ "subprocess, ensure that the FAB " +#~ "dependencies have been installed in the" +#~ " SuperNode images. This can be done" +#~ " by extending the SuperNode image:" +#~ msgstr "" + +#~ msgid "Dockerfile.supernode" #~ msgstr "" #~ msgid "" -#~ "The concept of federating an existing" -#~ " workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`jax_training.py` for the" -#~ " *clients* that are connected to the" -#~ " *server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server*, which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process, and we " -#~ "repeat this for multiple rounds." +#~ "Next, build the SuperNode Docker image" +#~ " by running the following command in" +#~ " the directory where Dockerfile is " +#~ "located:" +#~ msgstr "" + +#~ msgid "Run the ClientApp as a Subprocess" #~ msgstr "" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined JAX training" -#~ " in :code:`jax_training.py`. Our *client* " -#~ "needs to import :code:`flwr`, but also" -#~ " :code:`jax` and :code:`jaxlib` to update" -#~ " the parameters on our JAX model:" +#~ "Start the SuperNode with the flag " +#~ "``--isolation subprocess``, which tells the" +#~ " SuperNode to execute the ClientApp " +#~ "as a subprocess:" +#~ msgstr "" + +#~ msgid "Run the example and follow the logs of the ServerApp:" #~ msgstr "" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`FlowerClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "JAX) because it avoids some of the" -#~ " boilerplate that would otherwise be " -#~ "necessary. :code:`FlowerClient` needs to " -#~ "implement four methods, two methods for" -#~ " getting/setting model parameters, one " -#~ "method for training the model, and " -#~ "one method for testing the model:" +#~ "That is all it takes! You can " +#~ "monitor the progress of the run " +#~ "through the logs of the SuperExec." +#~ msgstr "" + +#~ msgid "" +#~ "You will learn how to run the " +#~ "Flower client and server components on" +#~ " two separate machines, with Flower " +#~ "configured to use TLS encryption and " +#~ "persist SuperLink state across restarts. " +#~ "A server consists of a SuperLink " +#~ "and ``SuperExec``. For more details " +#~ "about the Flower architecture, refer to" +#~ " the :doc:`../explanation-flower-architecture`" +#~ " explainer page." #~ msgstr "" -#~ msgid ":code:`set_parameters (optional)`" +#~ msgid "" +#~ "First, set the environment variables " +#~ "``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the" +#~ " IP address from the remote machine." +#~ " For example, if the IP is " +#~ "``192.168.2.33``, execute:" #~ msgstr "" -#~ msgid "transform parameters to NumPy :code:`ndarray`'s" +#~ msgid "" +#~ "Log into the remote machine using " +#~ "``ssh`` and run the following command" +#~ " to start the SuperLink and SuperExec" +#~ " services:" #~ msgstr "" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the JAX model parameters from " -#~ ":code:`DeviceArray` to :code:`NumPy ndarray` " -#~ "to make them compatible with " -#~ "`NumPyClient`." +#~ "Specify the remote SuperExec IP " +#~ "addresses and the path to the root" +#~ " certificate in the ``[tool.flwr.federations" +#~ ".remote-superexec]`` table in the " +#~ "``pyproject.toml`` file. Here, we have " +#~ "named our remote federation ``remote-" +#~ "superexec``:" +#~ msgstr "" + +#~ msgid "Run the project and follow the ServerApp logs:" #~ msgstr "" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`evaluate()` previously defined in " -#~ ":code:`jax_training.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "``-p 9091:9091 -p 9092:9092``: Map port" +#~ " ``9091`` and ``9092`` of the " +#~ "container to the same port of" +#~ msgstr "" + +#~ msgid "the host machine, allowing other services to access the Driver API on" #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Logistic " -#~ "Regression` model on MNIST using Flower" -#~ " and scikit-learn." +#~ "``http://localhost:9091`` and the Fleet API" +#~ " on ``http://localhost:9092``." +#~ msgstr "" + +#~ msgid "Step 3: Start the SuperNode" #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. However, before" -#~ " setting up the client and server," -#~ " we will define all functionalities " -#~ "that we need for our federated " -#~ "learning setup within :code:`utils.py`. The" -#~ " :code:`utils.py` contains different functions" -#~ " defining all the machine learning " -#~ "basics:" +#~ "``flwr/supernode:|stable_flwr_version|``: This is " +#~ "the name of the image to be " +#~ "run and the specific tag" #~ msgstr "" -#~ msgid ":code:`get_model_parameters()`" +#~ msgid "" +#~ "``--supernode-address 0.0.0.0:9094``: Set the" +#~ " address and port number that the " +#~ "SuperNode" #~ msgstr "" -#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgid "is listening on." #~ msgstr "" -#~ msgid ":code:`set_model_params()`" +#~ msgid "Step 4: Start the ClientApp" #~ msgstr "" -#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgid "" +#~ "The ClientApp Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own ClientApp image. In" +#~ " order to install the FAB " +#~ "dependencies, you will need to create" +#~ " a Dockerfile that extends the " +#~ "ClientApp image and installs the " +#~ "required dependencies." #~ msgstr "" -#~ msgid ":code:`set_initial_params()`" +#~ msgid "" +#~ "Create a ClientApp Dockerfile called " +#~ "``Dockerfile.clientapp`` and paste the " +#~ "following code into it:" +#~ msgstr "" + +#~ msgid "Dockerfile.clientapp" #~ msgstr "" #~ msgid "" -#~ "Please check out :code:`utils.py` `here " -#~ "`_ for more details. " -#~ "The pre-defined functions are used " -#~ "in the :code:`client.py` and imported. " -#~ "The :code:`client.py` also requires to " -#~ "import several packages such as Flower" -#~ " and scikit-learn:" +#~ "to be built from is the " +#~ "``flwr/clientapp image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "Prior to local training, we need " -#~ "to load the MNIST dataset, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning, " -#~ "and partition the dataset for FL. " -#~ "This can be conveniently achieved using" -#~ " `Flower Datasets `_." -#~ " The :code:`FederatedDataset.load_partition()` method" -#~ " loads the partitioned training set " -#~ "for each partition ID defined in " -#~ "the :code:`--partition-id` argument." +#~ "``--supernode supernode-1:9094``: Connect to " +#~ "the SuperNode's Fleet API at the " +#~ "address" +#~ msgstr "" + +#~ msgid "``supernode-1:9094``." +#~ msgstr "" + +#~ msgid "Step 5: Start the SuperExec" #~ msgstr "" #~ msgid "" -#~ "Next, the logistic regression model is" -#~ " defined and initialized with " -#~ ":code:`utils.set_initial_params()`." +#~ "The procedure for building and running" +#~ " a SuperExec image is almost " +#~ "identical to the ClientApp image." #~ msgstr "" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to fit the logistic " -#~ "regression we defined earlier)." +#~ "Similar to the ClientApp image, you " +#~ "will need to create a Dockerfile " +#~ "that extends the SuperExec image and " +#~ "installs the required FAB dependencies." #~ msgstr "" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "scikit-learn. Implementing :code:`NumPyClient` " -#~ "usually means defining the following " -#~ "methods (:code:`set_parameters` is optional " -#~ "though):" +#~ "Create a SuperExec Dockerfile called " +#~ "``Dockerfile.superexec`` and paste the " +#~ "following code in:" #~ msgstr "" -#~ msgid ":code:`set_parameters` (optional)" +#~ msgid "Dockerfile.superexec" #~ msgstr "" -#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgid "" +#~ ":substitution-code:`FROM " +#~ "flwr/superexec:|stable_flwr_version|`: This line " +#~ "specifies that the Docker image" #~ msgstr "" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MnistClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "to be built from is the " +#~ "``flwr/superexec image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "``ENTRYPOINT [\"flower-superexec\"``: Set the" +#~ " command ``flower-superexec`` to be" #~ msgstr "" -#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +#~ msgstr "" + +#~ msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." #~ msgstr "" #~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy. Note that" -#~ " we also make use of Flower " -#~ "Datasets here to load the test " -#~ "split of the MNIST dataset for " -#~ "server-side evaluation." +#~ "Afterward, in the directory that holds" +#~ " the Dockerfile, execute this Docker " +#~ "command to build the SuperExec image:" +#~ msgstr "" + +#~ msgid "Start the SuperExec container:" #~ msgstr "" #~ msgid "" -#~ "The :code:`main` contains the server-" -#~ "side parameter initialization " -#~ ":code:`utils.set_initial_params()` as well as " -#~ "the aggregation strategy " -#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" -#~ " the default one, federated averaging " -#~ "(or FedAvg), with two clients and " -#~ "evaluation after each federated learning " -#~ "round. The server can be started " -#~ "with the command " -#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -#~ "strategy=strategy, " -#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ "``-p 9093:9093``: Map port ``9093`` of" +#~ " the container to the same port " +#~ "of" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/sklearn-logreg-" -#~ "mnist`." +#~ "the host machine, allowing you to " +#~ "access the SuperExec API on " +#~ "``http://localhost:9093``." +#~ msgstr "" + +#~ msgid "``--name superexec``: Assign the name ``superexec`` to the container." #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a federated XGBoost model" -#~ " on HIGGS dataset using Flower and" -#~ " :code:`xgboost` package. We use a " -#~ "simple example (`full code xgboost-" -#~ "quickstart `_) with two *clients* " -#~ "and one *server* to demonstrate how " -#~ "federated XGBoost works, and then we " -#~ "dive into a more complex example " -#~ "(`full code xgboost-comprehensive " -#~ "`_) to run various experiments." +#~ "``flwr_superexec:0.0.1``: This is the name " +#~ "of the image to be run and " +#~ "the specific tag" #~ msgstr "" #~ msgid "" -#~ "Since we want to use :code:`xgboost` " -#~ "package to build up XGBoost trees, " -#~ "let's go ahead and install " -#~ ":code:`xgboost`:" +#~ "``--executor-config superlink=\\\"superlink:9091\\\"``:" +#~ " Configure the SuperExec executor to" +#~ msgstr "" + +#~ msgid "connect to the SuperLink running on port ``9091``." +#~ msgstr "" + +#~ msgid "Stop the current ClientApp containers:" +#~ msgstr "" + +#~ msgid "Launch two new ClientApp containers based on the newly built image:" #~ msgstr "" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import xgboost, Flower, Flower Datasets " -#~ "and other related functions:" +#~ "Setting the ``PROJECT_DIR`` helps Docker " +#~ "Compose locate the ``pyproject.toml`` file," +#~ " allowing it to install dependencies " +#~ "in the SuperExec and SuperNode images" +#~ " correctly." #~ msgstr "" #~ msgid "" -#~ "In this example, we split the " -#~ "dataset into 30 partitions with uniform" -#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." -#~ " Then, we load the partition for " -#~ "the given client based on " -#~ ":code:`partition_id`:" +#~ "To ensure the ``flwr`` CLI connects " +#~ "to the SuperExec, you need to " +#~ "specify the SuperExec addresses in the" +#~ " ``pyproject.toml`` file." #~ msgstr "" #~ msgid "" -#~ "After that, we do train/test splitting" -#~ " on the given partition (client's " -#~ "local data), and transform data format" -#~ " for :code:`xgboost` package." +#~ "Run the quickstart example, monitor the" +#~ " ServerApp logs and wait for the " +#~ "summary to appear:" +#~ msgstr "" + +#~ msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#~ msgstr "" + +#~ msgid "Step 7: Add another SuperNode" #~ msgstr "" #~ msgid "" -#~ "The functions of :code:`train_test_split` and" -#~ " :code:`transform_dataset_to_dmatrix` are defined " -#~ "as below:" +#~ "You can add more SuperNodes and " +#~ "ClientApps by duplicating their definitions" +#~ " in the ``compose.yml`` file." #~ msgstr "" #~ msgid "" -#~ "The :code:`num_local_round` represents the " -#~ "number of iterations for local tree " -#~ "boost. We use CPU for the training" -#~ " in default. One can shift it " -#~ "to GPU by setting :code:`tree_method` to" -#~ " :code:`gpu_hist`. We use AUC as " -#~ "evaluation metric." +#~ "Just give each new SuperNode and " +#~ "ClientApp service a unique service name" +#~ " like ``supernode-3``, ``clientapp-3``, etc." +#~ msgstr "" + +#~ msgid "In ``compose.yml``, add the following:" #~ msgstr "" #~ msgid "" -#~ "After loading the dataset we define " -#~ "the Flower client. We follow the " -#~ "general rule to define :code:`XgbClient` " -#~ "class inherited from :code:`fl.client.Client`." +#~ "If you also want to enable TLS " +#~ "for the new SuperNodes, duplicate the" +#~ " SuperNode definition for each new " +#~ "SuperNode service in the ``with-" +#~ "tls.yml`` file." #~ msgstr "" #~ msgid "" -#~ "All required parameters defined above " -#~ "are passed to :code:`XgbClient`'s constructor." +#~ "Make sure that the names of the" +#~ " services match with the one in " +#~ "the ``compose.yml`` file." +#~ msgstr "" + +#~ msgid "In ``with-tls.yml``, add the following:" +#~ msgstr "" + +#~ msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +#~ msgstr "" + +#~ msgid "Enable SSL connections" #~ msgstr "" #~ msgid "" -#~ "Then, we override :code:`get_parameters`, " -#~ ":code:`fit` and :code:`evaluate` methods " -#~ "insides :code:`XgbClient` class as follows." +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server " +#~ "(``SuperLink``) can be started and how" +#~ " a Flower client (``SuperNode``) can " +#~ "establish a secure connections to it." #~ msgstr "" #~ msgid "" -#~ "Unlike neural network training, XGBoost " -#~ "trees are not started from a " -#~ "specified random weights. In this case," -#~ " we do not use :code:`get_parameters` " -#~ "and :code:`set_parameters` to initialise model" -#~ " parameters for XGBoost. As a result," -#~ " let's return an empty tensor in " -#~ ":code:`get_parameters` when it is called " -#~ "by the server at the first round." +#~ "The code example comes with a " +#~ "``README.md`` file which explains how to" +#~ " start it. Although it is already " +#~ "SSL-enabled, it might be less " +#~ "descriptive on how it does so. " +#~ "Stick to this guide for a deeper" +#~ " introduction to the topic." +#~ msgstr "" + +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in ``examples/advanced-" +#~ "tensorflow/certificates/generate.sh`` with the " +#~ "following command sequence:" +#~ msgstr "" + +#~ msgid "" +#~ "The approach for generating SSL " +#~ "certificates in the context of this " +#~ "example can serve as an inspiration " +#~ "and starting point, but it should " +#~ "not be used as a reference for " +#~ "production environments. Please refer to " +#~ "other sources regarding the issue of " +#~ "correctly generating certificates for " +#~ "production environments. For non-critical " +#~ "prototyping or research projects, it " +#~ "might be sufficient to use the " +#~ "self-signed certificates generated using " +#~ "the scripts mentioned in this guide." #~ msgstr "" #~ msgid "" -#~ "In :code:`fit`, at the first round, " -#~ "we call :code:`xgb.train()` to build up" -#~ " the first set of trees. From " -#~ "the second round, we load the " -#~ "global model sent from server to " -#~ "new build Booster object, and then " -#~ "update model weights on local training" -#~ " data with function :code:`local_boost` as" -#~ " follows:" +#~ "Use the following terminal command to" +#~ " start a sever (SuperLink) that uses" +#~ " the previously generated certificates:" #~ msgstr "" -#~ msgid "" -#~ "Given :code:`num_local_round`, we update trees" -#~ " by calling :code:`bst_input.update` method. " -#~ "After training, the last " -#~ ":code:`N=num_local_round` trees will be " -#~ "extracted to send to the server." +#~ msgid "Client (SuperNode)" #~ msgstr "" #~ msgid "" -#~ "In :code:`evaluate`, after loading the " -#~ "global model, we call :code:`bst.eval_set` " -#~ "function to conduct evaluation on valid" -#~ " set. The AUC value will be " -#~ "returned." +#~ "You should now have learned how to" +#~ " generate self-signed certificates using" +#~ " the given script, start an SSL-" +#~ "enabled server and have a client " +#~ "establish a secure connection to it." #~ msgstr "" #~ msgid "" -#~ "Now, we can create an instance of" -#~ " our class :code:`XgbClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "This guide is for users who have" +#~ " already worked with Flower 0.x and" +#~ " want to upgrade to Flower 1.0. " +#~ "Newer versions of Flower (1.12+) are " +#~ "based on a new architecture (previously" +#~ " called Flower Next) and not covered" +#~ " in this guide. After upgrading " +#~ "Flower 0.x projects to Flower 1.0, " +#~ "please refer to :doc:`Upgrade to Flower" +#~ " Next ` to make your project compatible" +#~ " with the lastest version of Flower." #~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` and " -#~ "call :code:`fl.client.start_client()`. The string" -#~ " :code:`\"[::]:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ msgid "Upgrade to Flower Next" #~ msgstr "" #~ msgid "" -#~ "In a file named :code:`server.py`, " -#~ "import Flower and FedXgbBagging from " -#~ ":code:`flwr.server.strategy`." +#~ "Welcome to the migration guide for " +#~ "updating Flower to Flower Next! Whether" +#~ " you're a seasoned user or just " +#~ "getting started, this guide will help" +#~ " you smoothly transition your existing " +#~ "setup to take advantage of the " +#~ "latest features and improvements in " +#~ "Flower Next, starting from version 1.8." #~ msgstr "" #~ msgid "" -#~ "We use two clients for this " -#~ "example. An :code:`evaluate_metrics_aggregation` " -#~ "function is defined to collect and " -#~ "wighted average the AUC values from " -#~ "clients. The :code:`config_func` function is" -#~ " to return the current FL round " -#~ "number to client's :code:`fit()` and " -#~ ":code:`evaluate()` methods." +#~ "This guide shows how to reuse " +#~ "pre-``1.8`` Flower code with minimum " +#~ "code changes by using the *compatibility" +#~ " layer* in Flower Next. In another" +#~ " guide, we will show how to run" +#~ " Flower Next end-to-end with " +#~ "pure Flower Next APIs." #~ msgstr "" #~ msgid "" -#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," -#~ " we define :code:`FedXgbBagging` inherited " -#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" -#~ " override the :code:`aggregate_fit`, " -#~ ":code:`aggregate_evaluate` and :code:`evaluate` " -#~ "methods as follows:" +#~ "Here's how to update an existing " +#~ "installation of Flower to Flower Next" +#~ " with ``pip``:" #~ msgstr "" -#~ msgid "" -#~ "In :code:`aggregate_fit`, we sequentially " -#~ "aggregate the clients' XGBoost trees by" -#~ " calling :code:`aggregate()` function:" +#~ msgid "or if you need Flower Next with simulation:" #~ msgstr "" -#~ msgid "" -#~ "In this function, we first fetch " -#~ "the number of trees and the number" -#~ " of parallel trees for the current" -#~ " and previous model by calling " -#~ ":code:`_get_tree_nums`. Then, the fetched " -#~ "information will be aggregated. After " -#~ "that, the trees (containing model " -#~ "weights) are aggregated to generate a" -#~ " new tree model." +#~ msgid "Using Poetry" #~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated XGBoost " -#~ "system. The AUC values can be " -#~ "checked in :code:`metrics_distributed`. One " -#~ "can see that the average AUC " -#~ "increases over FL rounds." +#~ "Update the ``flwr`` dependency in " +#~ "``pyproject.toml`` and then reinstall (don't" +#~ " forget to delete ``poetry.lock`` via " +#~ "``rm poetry.lock`` before running ``poetry " +#~ "install``)." #~ msgstr "" #~ msgid "" -#~ "The full `source code " -#~ "`_ for this example can be" -#~ " found in :code:`examples/xgboost-quickstart`." +#~ "Ensure you set the following version " +#~ "constraint in your ``pyproject.toml``:" #~ msgstr "" #~ msgid "" -#~ "To do this, we first customise a" -#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ "In Flower Next, the *infrastructure* and" +#~ " *application layers* have been decoupled." +#~ " Instead of starting a client in " +#~ "code via ``start_client()``, you create " +#~ "a |clientapp_link|_ and start it via " +#~ "the command line. Instead of starting" +#~ " a server in code via " +#~ "``start_server()``, you create a " +#~ "|serverapp_link|_ and start it via the" +#~ " command line. The long-running " +#~ "components of server and client are " +#~ "called SuperLink and SuperNode. The " +#~ "following non-breaking changes that " +#~ "require manual updates and allow you " +#~ "to run your project both in the" +#~ " traditional way and in the Flower" +#~ " Next way:" #~ msgstr "" #~ msgid "" -#~ "The customised :code:`ClientManager` samples " -#~ "all available clients in each FL " -#~ "round based on the order of " -#~ "connection to the server. Then, we " -#~ "define a new strategy :code:`FedXgbCyclic` " -#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " -#~ "order to sequentially select only one" -#~ " client in given round and pass " -#~ "the received model to next client." +#~ "Wrap your existing client with " +#~ "|clientapp_link|_ instead of launching it " +#~ "via |startclient_link|_. Here's an example:" #~ msgstr "" #~ msgid "" -#~ "Unlike the original :code:`FedAvg`, we " -#~ "don't perform aggregation here. Instead, " -#~ "we just make a copy of the " -#~ "received client model as global model" -#~ " by overriding :code:`aggregate_fit`." +#~ "Wrap your existing strategy with " +#~ "|serverapp_link|_ instead of starting the " +#~ "server via |startserver_link|_. Here's an " +#~ "example:" #~ msgstr "" #~ msgid "" -#~ "Also, the customised :code:`configure_fit` and" -#~ " :code:`configure_evaluate` methods ensure the" -#~ " clients to be sequentially selected " -#~ "given FL round:" +#~ "Run the ``SuperLink`` using " +#~ "|flowernext_superlink_link|_ before running, in " +#~ "sequence, |flowernext_clientapp_link|_ (2x) and " +#~ "|flowernext_serverapp_link|_. There is no need" +#~ " to execute `client.py` and `server.py` " +#~ "as Python scripts." #~ msgstr "" #~ msgid "" -#~ "In :code:`dataset.py`, we have a " -#~ "function :code:`instantiate_partitioner` to " -#~ "instantiate the data partitioner based " -#~ "on the given :code:`num_partitions` and " -#~ ":code:`partitioner_type`. Currently, we provide " -#~ "four supported partitioner type to " -#~ "simulate the uniformity/non-uniformity in " -#~ "data quantity (uniform, linear, square, " -#~ "exponential)." +#~ "Here's an example to start the " +#~ "server without HTTPS (only for " +#~ "prototyping):" #~ msgstr "" #~ msgid "" -#~ "To facilitate centralised evaluation, we " -#~ "define a function in :code:`server_utils.py`:" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--ssl-ca-certfile``," +#~ " ``--ssl-certfile``, and ``--ssl-keyfile``" +#~ " command line options to pass paths" +#~ " to (CA certificate, server certificate," +#~ " and server private key)." #~ msgstr "" -#~ msgid "" -#~ "This function returns a evaluation " -#~ "function which instantiates a :code:`Booster`" -#~ " object and loads the global model" -#~ " weights to it. The evaluation is " -#~ "conducted by calling :code:`eval_set()` " -#~ "method, and the tested AUC value " -#~ "is reported." +#~ msgid "Simulation in CLI" #~ msgstr "" #~ msgid "" -#~ "As for distributed evaluation on the " -#~ "clients, it's same as the quick-" -#~ "start example by overriding the " -#~ ":code:`evaluate()` method insides the " -#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ "Wrap your existing client and strategy" +#~ " with |clientapp_link|_ and |serverapp_link|_," +#~ " respectively. There is no need to" +#~ " use |startsim_link|_ anymore. Here's an" +#~ " example:" #~ msgstr "" #~ msgid "" -#~ "We also provide an example code " -#~ "(:code:`sim.py`) to use the simulation " -#~ "capabilities of Flower to simulate " -#~ "federated XGBoost training on either a" -#~ " single machine or a cluster of " -#~ "machines." +#~ "Run |flower_simulation_link|_ in CLI and " +#~ "point to the ``server_app`` / " +#~ "``client_app`` object in the code " +#~ "instead of executing the Python script." +#~ " Here's an example (assuming the " +#~ "``server_app`` and ``client_app`` objects are" +#~ " in a ``sim.py`` module):" #~ msgstr "" #~ msgid "" -#~ "After importing all required packages, " -#~ "we define a :code:`main()` function to" -#~ " perform the simulation process:" +#~ "Set default resources for each " +#~ "|clientapp_link|_ using the ``--backend-" +#~ "config`` command line argument instead " +#~ "of setting the ``client_resources`` argument" +#~ " in |startsim_link|_. Here's an example:" #~ msgstr "" -#~ msgid "" -#~ "We first load the dataset and " -#~ "perform data partitioning, and the " -#~ "pre-processed data is stored in a " -#~ ":code:`list`. After the simulation begins, " -#~ "the clients won't need to pre-" -#~ "process their partitions again." +#~ msgid "Simulation in a Notebook" #~ msgstr "" #~ msgid "" -#~ "After that, we start the simulation " -#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ "Run |runsim_link|_ in your notebook " +#~ "instead of |startsim_link|_. Here's an " +#~ "example:" #~ msgstr "" #~ msgid "" -#~ "One of key parameters for " -#~ ":code:`start_simulation` is :code:`client_fn` which" -#~ " returns a function to construct a" -#~ " client. We define it as follows:" +#~ "Some official `Flower code examples " +#~ "`_ are already " +#~ "updated to Flower Next so they can" +#~ " serve as a reference for using " +#~ "the Flower Next API. If there are" +#~ " further questions, `join the Flower " +#~ "Slack `_ and " +#~ "use the channel ``#questions``. You can" +#~ " also `participate in Flower Discuss " +#~ "`_ where you can " +#~ "find us answering questions, or share" +#~ " and learn from others about " +#~ "migrating to Flower Next." #~ msgstr "" #~ msgid "" -#~ "In :code:`utils.py`, we define the " -#~ "arguments parsers for clients, server " -#~ "and simulation, allowing users to " -#~ "specify different experimental settings. Let's" -#~ " first see the sever side:" +#~ "As we continuously enhance Flower Next" +#~ " at a rapid pace, we'll be " +#~ "periodically updating this guide. Please " +#~ "feel free to share any feedback " +#~ "with us!" #~ msgstr "" #~ msgid "" -#~ "This allows user to specify training " -#~ "strategies / the number of total " -#~ "clients / FL rounds / participating " -#~ "clients / clients for evaluation, and" -#~ " evaluation fashion. Note that with " -#~ ":code:`--centralised-eval`, the sever will " -#~ "do centralised evaluation and all " -#~ "functionalities for client evaluation will " -#~ "be disabled." +#~ "This function is deprecated since " +#~ "1.13.0. Use :code: `flwr run` to " +#~ "start a Flower simulation." #~ msgstr "" -#~ msgid "" -#~ "This defines various options for client" -#~ " data partitioning. Besides, clients also" -#~ " have an option to conduct evaluation" -#~ " on centralised test set by setting" -#~ " :code:`--centralised-eval`, as well as " -#~ "an option to perform scaled learning " -#~ "rate based on the number of " -#~ "clients by setting :code:`--scaled-lr`." +#~ msgid "|c9344c3dfee24383908fabaac40a8504|" #~ msgstr "" -#~ msgid "" -#~ "The full `code " -#~ "`_ for this comprehensive " -#~ "example can be found in :code:`examples" -#~ "/xgboost-comprehensive`." +#~ msgid "|c10cd8f2177641bd8091c7b76d318ff9|" #~ msgstr "" -#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgid "|3c59c315e67945ea8b839381c5deb6c2|" #~ msgstr "" -#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgid "|eadf87e1e20549789512f7aa9199fcff|" #~ msgstr "" -#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgid "|66ce8f21aeb443fca1fc88f727458417|" #~ msgstr "" -#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgid "|f5768015a1014396b4761bb6cb3677f5|" #~ msgstr "" -#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgid "|a746aa3f56064617a4e00f4c6a0cb140|" #~ msgstr "" -#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgid "|cf8f676dd3534a44995c1b40910fd030|" #~ msgstr "" -#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgid "|d1c0e3a4c9dc4bfd88ee6f1fe626edaf|" #~ msgstr "" -#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgid "|1d8d6298a4014ec3a717135bcc7a94f9|" #~ msgstr "" -#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgid "|e3ea79200ff44d459358b9f4713e582b|" #~ msgstr "" -#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgid "|3e1061718a4a49d485764d30a4bfecdd|" #~ msgstr "" -#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgid "|7750e597d1ea4e319f7e0a40539bf214|" #~ msgstr "" -#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgid "|dd4434075f374e99ac07f509a883778f|" #~ msgstr "" -#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgid "Other changes" #~ msgstr "" -#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgid "|cf5fe148406b44b9a8b842fb01b5a7ea|" #~ msgstr "" -#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgid "|ba25c91426d64cc1ae2d3febc5715b35|" #~ msgstr "" -#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgid "|fca67f83aaab4389aa9ebb4d9c5cd75e|" #~ msgstr "" -#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgid "|6f2e8f95c95443379b0df00ca9824654|" #~ msgstr "" -#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgid "|c0ab3a1a733d4dbc9e1677aa608e8038|" #~ msgstr "" -#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgid "|8f0491bde07341ab9f2e23d50593c0be|" #~ msgstr "" -#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgid "|762fc099899943688361562252c5e600|" #~ msgstr "" -#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgid "|f62d365fd0ae405b975d3ca01e7183fd|" #~ msgstr "" -#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgid "|2c78fc1816b143289f4d909388f92a80|" #~ msgstr "" -#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgid "|4230725aeebe497d8ad84a3efc2a912b|" #~ msgstr "" -#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgid "|64b66a88417240eabe52f5cc55d89d0b|" #~ msgstr "" -#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgid "|726c8eca58bc4f859b06aa24a587b253|" #~ msgstr "" -#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgid "|f9d869e4b33c4093b29cf24ed8dff80a|" #~ msgstr "" -#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgid "|4ab50bc01a9f426a91a2c0cbc3ab7a84|" #~ msgstr "" -#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgid "" +#~ "Until the Flower core library matures" +#~ " it will be easier to get PR's" +#~ " accepted if they only touch non-" +#~ "core areas of the codebase. Good " +#~ "candidates to get started are:" +#~ msgstr "" + +#~ msgid "Request for Flower Baselines" #~ msgstr "" #~ msgid "" -#~ "If you don't have ``pyenv`` installed," -#~ " the following script that will " -#~ "install it, set it up, and create" -#~ " the virtual environment (with ``Python " -#~ "3.9.20`` by default):" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." #~ msgstr "" #~ msgid "" -#~ "If you already have ``pyenv`` installed" -#~ " (along with the ``pyenv-virtualenv`` " -#~ "plugin), you can use the following " -#~ "convenience script (with ``Python 3.9.20`` " -#~ "by default):" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignees," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" #~ msgstr "" -#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgid "" +#~ "Otherwise, if you don't find a " +#~ "baseline you'd like to work on, be" +#~ " sure to open a new issue with" +#~ " the baseline request template!" #~ msgstr "" -#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgid "Request for examples" #~ msgstr "" -#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgid "" +#~ "We wish we had more time to " +#~ "write usage examples because we believe" +#~ " they help users to get started " +#~ "with building what they want to " +#~ "build. Here are a few ideas where" +#~ " we'd be happy to accept a PR:" #~ msgstr "" -#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" #~ msgstr "" -#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgid "Android ONNX on-device training" #~ msgstr "" -#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgid "|f150b8d6e0074250822c9f6f7a8de3e0|" #~ msgstr "" -#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgid "|72772d10debc4abd8373c0bc82985422|" #~ msgstr "" -#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgid "|5815398552ad41d290a3a2631fe8f6ca|" #~ msgstr "" -#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgid "|e6ac20744bf149378be20ac3dc309356|" #~ msgstr "" -#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgid "|a4011ef443c14725b15a8cf33b0e3443|" #~ msgstr "" -#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgid "|a22faa3617404c06803731525e1c609f|" #~ msgstr "" -#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgid "|84a5c9b5041c43c3beab9786197c3e4e|" #~ msgstr "" -#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgid "|b5c4be0b52d4493ba8c4af14d7c2db97|" #~ msgstr "" -#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgid "|c1c784183d18481186ff65dc261d1335|" +#~ msgstr "" + +#~ msgid "|669fcd1f44ab42f5bbd196c3cf1ecbc2|" +#~ msgstr "" + +#~ msgid "|edfb08758c9441afb6736045a59e154c|" +#~ msgstr "" + +#~ msgid "|82338b8bbad24d5ea9df3801aab37852|" +#~ msgstr "" + +#~ msgid "|518d994dd2c844898b441da03b858326|" +#~ msgstr "" + +#~ msgid "|7bfcfcb57ae5403f8e18486f45ca48b4|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index a1598faa0ee4..3b56d02c7503 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"POT-Creation-Date: 2024-11-30 00:31+0000\n" "PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" @@ -971,9 +971,9 @@ msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版 #: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy msgid "" -"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " -"order to add every new change to the changelog (feel free to make manual " -"changes to the changelog afterwards until it looks good)." +"Run ``python3 ./dev/update_changelog.py `` in order to add" +" every new change to the changelog (feel free to make manual changes to " +"the changelog afterwards until it looks good)." msgstr "" "运行 ``python3 src/py/flwr_tool/update_changelog.py `` " "以将每项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)。" @@ -1335,10 +1335,10 @@ msgid "Where to start" msgstr "从哪里开始" #: ../../source/contributor-ref-good-first-contributions.rst:11 +#, fuzzy msgid "" -"Until the Flower core library matures it will be easier to get PR's " -"accepted if they only touch non-core areas of the codebase. Good " -"candidates to get started are:" +"In general, it is easier to get PR's accepted if they only touch non-core" +" areas of the codebase. Good candidates to get started are:" msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" #: ../../source/contributor-ref-good-first-contributions.rst:14 @@ -1346,113 +1346,128 @@ msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "文档: 缺少什么?哪些内容可以表达得更清楚?" #: ../../source/contributor-ref-good-first-contributions.rst:15 +#, python-format +msgid "" +"Open issues: Issues with the tag `good first issue " +"`_." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Baselines: See below." msgstr "Baselines: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:16 +#: ../../source/contributor-ref-good-first-contributions.rst:18 msgid "Examples: See below." msgstr "示例: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:19 -msgid "Request for Flower Baselines" -msgstr "Flower Baselines的申请" - #: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy +msgid "Flower Baselines" +msgstr "**更新 Flower Baselines**" + +#: ../../source/contributor-ref-good-first-contributions.rst:23 +#, fuzzy msgid "" -"If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines " -"`_." +"If you are not familiar with Flower Baselines, please check our " +"`contributing guide for baselines `_." msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:26 #, fuzzy msgid "" -"You should then check out the open `issues " +"Then take a look at the open `issues " "`_" -" for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignees, feel free to assign it to yourself and start " -"working on it!" +" for baseline requests. If you find a baseline that you'd like to work " +"on, and it has no assignees, feel free to assign it to yourself and get " +"started!" msgstr "" "然后查看开放的 `issues " "`_" " baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" -#: ../../source/contributor-ref-good-first-contributions.rst:30 +#: ../../source/contributor-ref-good-first-contributions.rst:31 +#, fuzzy msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to" -" open a new issue with the baseline request template!" +"If you don't find the baseline you'd like to work on, be sure to open a " +"new issue with the baseline request template!" msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(GitHub issue)!" -#: ../../source/contributor-ref-good-first-contributions.rst:34 -msgid "Request for examples" -msgstr "示例请求" +#: ../../source/contributor-ref-good-first-contributions.rst:35 +#, fuzzy +msgid "Usage examples" +msgstr "实例" -#: ../../source/contributor-ref-good-first-contributions.rst:36 +#: ../../source/contributor-ref-good-first-contributions.rst:37 +#, fuzzy msgid "" -"We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are" -" a few ideas where we'd be happy to accept a PR:" +"We wish we had more time to write usage examples because they help users " +"to get started with building what they want. If you notice any missing " +"examples that could help others, feel free to contribute!" msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建他们想要的东西。以下是我们乐意接受 PR 的几个想法:" -#: ../../source/contributor-ref-good-first-contributions.rst:40 -msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "微调 Llama 2,使用 Hugging Face Transformers 和 PyTorch" - -#: ../../source/contributor-ref-good-first-contributions.rst:41 -msgid "XGBoost" -msgstr "XGBoost" - -#: ../../source/contributor-ref-good-first-contributions.rst:42 -msgid "Android ONNX on-device training" -msgstr "安卓 ONNX 设备上训练" - #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" msgstr "安全聚合协议" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:6 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " -"protocol has not been implemented yet, so its diagram and abstraction may" -" not be accurate in practice. The SecAgg protocol can be considered as a " -"special case of the SecAgg+ protocol." +"While this term might be used in other places, here it refers to a series" +" of protocols, including ``SecAgg``, ``SecAgg+``, ``LightSecAgg``, " +"``FastSecAgg``, etc. This concept was first proposed by Bonawitz et al. " +"in `Practical Secure Aggregation for Federated Learning on User-Held Data" +" `_." msgstr "" -"包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " -"协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" - -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 -#, fuzzy -msgid "The ``SecAgg+`` abstraction" -msgstr "代码:`SecAgg+` 抽象" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" -"In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have" -" keys of int type rather than ClientProxy type." +"Secure Aggregation protocols are used to securely aggregate model updates" +" from multiple clients while keeping the updates private. This is done by" +" encrypting the model updates before sending them to the server. The " +"server can decrypt only the aggregated model update without being able to" +" inspect individual updates." msgstr "" -"在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " -"ClientProxy 类型。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:16 msgid "" -"The Flower server will execute and process received results in the " -"following order:" -msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" +"Flower now provides the ``SecAgg`` and ``SecAgg+`` protocols. While we " +"plan to implement more protocols in the future, one may also implement " +"their own custom secure aggregation protocol via low-level APIs." +msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 -#, fuzzy -msgid "The ``LightSecAgg`` abstraction" -msgstr "代码:`LightSecAgg` 抽象" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:21 +msgid "The ``SecAgg+`` protocol in Flower" +msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 -msgid "Types" -msgstr "类型" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:23 +msgid "" +"The ``SecAgg+`` protocol is implemented using the ``SecAggPlusWorkflow`` " +"in the ``ServerApp`` and the ``secaggplus_mod`` in the ``ClientApp``. The" +" ``SecAgg`` protocol is a special case of the ``SecAgg+`` protocol, and " +"one may use ``SecAggWorkflow`` and ``secagg_mod`` for that." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:28 +msgid "" +"You may find a detailed example in the `Secure Aggregation Example " +"`_. The " +"documentation for the ``SecAgg+`` protocol configuration is available at " +"`SecAggPlusWorkflow `_." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:33 +msgid "" +"The logic of the ``SecAgg+`` protocol is illustrated in the following " +"sequence diagram: the dashed lines represent communication over the " +"network, and the solid lines represent communication within the same " +"process. The ``ServerApp`` is connected to ``SuperLink``, and the " +"``ClientApp`` is connected to the ``SuperNode``; thus, the communication " +"between the ``ServerApp`` and the ``ClientApp`` is done via the " +"``SuperLink`` and the ``SuperNode``." +msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 msgid "Contribute on GitHub" @@ -2086,7 +2101,6 @@ msgstr "" "contributions.html>`_,在这里你应该特别看看 :code:`baselines` 的贡献。" #: ../../source/contributor-tutorial-contribute-on-github.rst:357 -#: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" @@ -2184,7 +2198,6 @@ msgid "Get started as a contributor" msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/docker/run-as-subprocess.rst:11 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 @@ -2464,18 +2477,11 @@ msgstr "启用 SSL 连接" #: ../../source/docker/enable-tls.rst:4 msgid "" "When operating in a production environment, it is strongly recommended to" -" enable Transport Layer Security (TLS) for each Flower Component to " +" enable Transport Layer Security (TLS) for each Flower component to " "ensure secure communication." msgstr "" -#: ../../source/docker/enable-tls.rst:7 -#, fuzzy -msgid "" -"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" - -#: ../../source/docker/enable-tls.rst:12 +#: ../../source/docker/enable-tls.rst:9 #, fuzzy msgid "" "For testing purposes, you can generate your own self-signed certificates." @@ -2486,78 +2492,107 @@ msgstr "" "出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/docker/enable-tls.rst:17 +#: ../../source/docker/enable-tls.rst:16 msgid "" "Because Flower containers, by default, run with a non-root user ``app``, " "the mounted files and directories must have the proper permissions for " "the user ID ``49999``." msgstr "" -#: ../../source/docker/enable-tls.rst:20 +#: ../../source/docker/enable-tls.rst:19 msgid "" "For example, to change the user ID of all files in the ``certificates/`` " "directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -#: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:15 +#: ../../source/docker/enable-tls.rst:22 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " -"state``." +"certificates``." +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``subprocess``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:29 +msgid "" +"By default, the ServerApp is executed as a subprocess within the " +"SuperLink Docker container, and the ClientApp is run as a subprocess " +"within the SuperNode Docker container. You can learn more about the " +"different process modes here: :doc:`run-as-subprocess`." msgstr "" -#: ../../source/docker/enable-tls.rst:27 +#: ../../source/docker/enable-tls.rst:34 ../../source/docker/enable-tls.rst:119 +#, fuzzy +msgid "" +"To enable TLS between the SuperLink and SuperNode, as well as between the" +" SuperLink and the ``flwr`` CLI, you will need a PEM-encoded root " +"certificate, private key, and certificate chain." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" + +#: ../../source/docker/enable-tls.rst:37 #, fuzzy -msgid "SuperLink" +msgid "**SuperLink**" msgstr "flower-superlink" -#: ../../source/docker/enable-tls.rst:29 +#: ../../source/docker/enable-tls.rst:39 +#, fuzzy msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container:" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local " +"directories into the SuperLink container:" msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " +"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" #: ../../source/docker/enable-tls.rst #, fuzzy msgid "Understanding the command" msgstr "训练模型" -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 -#: ../../source/docker/enable-tls.rst:125 -#: ../../source/docker/tutorial-quickstart-docker.rst:66 -#: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:217 -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/enable-tls.rst:54 ../../source/docker/enable-tls.rst:96 +#: ../../source/docker/enable-tls.rst:140 +#: ../../source/docker/enable-tls.rst:179 +#: ../../source/docker/enable-tls.rst:206 +#: ../../source/docker/enable-tls.rst:231 +#: ../../source/docker/tutorial-quickstart-docker.rst:68 +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:221 +#: ../../source/docker/tutorial-quickstart-docker.rst:303 #, fuzzy msgid "``docker run``: This tells Docker to run a container from an image." msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 -#: ../../source/docker/enable-tls.rst:126 -#: ../../source/docker/tutorial-quickstart-docker.rst:67 -#: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:218 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/enable-tls.rst:55 ../../source/docker/enable-tls.rst:97 +#: ../../source/docker/enable-tls.rst:141 +#: ../../source/docker/enable-tls.rst:180 +#: ../../source/docker/enable-tls.rst:207 +#: ../../source/docker/enable-tls.rst:232 +#: ../../source/docker/tutorial-quickstart-docker.rst:69 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"``--volume ./certificates/:/app/certificates/:ro``: Mount the " -"``certificates`` directory in" +"``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the " +"``superlink-certificates``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"the current working directory of the host machine as a read-only volume " -"at the" +"directory in the current working directory of the host machine as a read-" +"only volume" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``/app/certificates`` directory inside the container." -msgstr "" +#, fuzzy +msgid "at the ``/app/certificates`` directory inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" #: ../../source/docker/enable-tls.rst msgid "" @@ -2569,17 +2604,8 @@ msgstr "" msgid "directory." msgstr "" -#: ../../source/docker/enable-tls.rst -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " -"the image to be run and the specific" -msgstr "" - -#: ../../source/docker/enable-tls.rst -msgid "" -"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " -"represents a specific version of the image." +#: ../../source/docker/enable-tls.rst:62 +msgid "````: The name of your SuperLink image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2644,22 +2670,12 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:72 +#: ../../source/docker/enable-tls.rst:79 #, fuzzy -msgid "SuperNode" +msgid "**SuperNode**" msgstr "flower-superlink" -#: ../../source/docker/enable-tls.rst:74 -#, fuzzy -msgid "" -"Assuming that the ``ca.crt`` certificate already exists locally, we can " -"use the flag ``--volume`` to mount the local certificate into the " -"container's ``/app/`` directory." -msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" - -#: ../../source/docker/enable-tls.rst:79 +#: ../../source/docker/enable-tls.rst:83 ../../source/docker/enable-tls.rst:189 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2667,24 +2683,24 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt``" msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"current working directory of the host machine as a read-only volume at " -"the ``/app/ca.crt``" +"file from the ``superlink-certificates`` directory of the host machine as" +" a read-only" msgstr "" #: ../../source/docker/enable-tls.rst #, fuzzy -msgid "directory inside the container." +msgid "volume at the ``/app/ca.crt`` directory inside the container." msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/docker/enable-tls.rst -msgid "" -":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " -"the image to be run and the specific" +#: ../../source/docker/enable-tls.rst:101 +msgid "````: The name of your SuperNode image to be run." msgstr "" #: ../../source/docker/enable-tls.rst @@ -2697,60 +2713,197 @@ msgstr "" msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/docker/enable-tls.rst:105 -msgid "SuperExec" +#: ../../source/docker/enable-tls.rst +msgid "Isolation Mode ``process``" +msgstr "" + +#: ../../source/docker/enable-tls.rst:109 +msgid "" +"In isolation mode ``process``, the ServerApp and ClientApp run in their " +"own processes. Unlike in isolation mode ``subprocess``, the SuperLink or " +"SuperNode does not attempt to create the respective processes; instead, " +"they must be created externally." +msgstr "" + +#: ../../source/docker/enable-tls.rst:113 +msgid "" +"It is possible to run only the SuperLink in isolation mode ``subprocess``" +" and the SuperNode in isolation mode ``process``, or vice versa, or even " +"both with isolation mode ``process``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:117 +msgid "**SuperLink and ServerApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:122 +#, fuzzy +msgid "" +"Assuming all files we need are in the local ``superlink-certificates`` " +"directory, we can use the flag ``--volume`` to mount the local directory " +"into the SuperLink container:" +msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " +"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory in the current working directory of " +"the host" msgstr "" -#: ../../source/docker/enable-tls.rst:107 +#: ../../source/docker/enable-tls.rst msgid "" -"Assuming all files we need are in the local ``certificates`` directory " -"where the SuperExec will be executed from, we can use the flag " -"``--volume`` to mount the local directory into the ``/app/certificates/``" -" directory of the container:" +"machine as a read-only volume at the ``/app/certificates`` directory " +"inside the container." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " "the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "SuperExec." +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-certfile certificates/server.pem``: Specify the location of the " -"SuperExec's" +"``--isolation process``: Tells the SuperLink that the ServerApp is " +"created by separate" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "independent process. The SuperLink does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:168 +#: ../../source/docker/tutorial-quickstart-docker.rst:207 +#, fuzzy +msgid "Start the ServerApp container:" +msgstr "启动服务器" + +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/enable-tls.rst:181 +msgid "````: The name of your ServerApp image to be run." msgstr "" #: ../../source/docker/enable-tls.rst msgid "" -"The ``certificates/server.pem`` file is used to identify the SuperExec " -"and to encrypt the" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--ssl-keyfile certificates/server.key``: Specify the location of the " -"SuperExec's" +"unencrypted communication. Secure connections will be added in future " +"releases." +msgstr "" + +#: ../../source/docker/enable-tls.rst:185 +msgid "**SuperNode and ClientApp**" +msgstr "" + +#: ../../source/docker/enable-tls.rst:192 +#, fuzzy +msgid "Start the SuperNode container:" +msgstr "启动服务器" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the " +"``ca.crt`` file from the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "" +"``superlink-certificates`` directory of the host machine as a read-only " +"volume at the ``/app/ca.crt``" msgstr "" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" + #: ../../source/docker/enable-tls.rst msgid "" -"``--executor-config root-" -"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" #: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"location of the CA certificate file inside the container that the " -"SuperExec executor" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" #: ../../source/docker/enable-tls.rst -msgid "should use to verify the SuperLink's identity." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" + +#: ../../source/docker/enable-tls.rst:220 +#, fuzzy +msgid "Start the ClientApp container:" +msgstr "使用虚拟客户端引擎" + +#: ../../source/docker/enable-tls.rst:233 +msgid "````: The name of your ClientApp image to be run." +msgstr "" + +#: ../../source/docker/enable-tls.rst:237 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:54 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" + +#: ../../source/docker/enable-tls.rst:239 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:330 +#, fuzzy +msgid "pyproject.toml" +msgstr "或 ``pyproject.toml```:" + +#: ../../source/docker/enable-tls.rst:246 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "" +"The path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/enable-tls.rst:251 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/enable-tls.rst:254 +msgid "" +"In this example, ``local-deployment-tls`` has been used. Just remember to" +" replace ``local-deployment-tls`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" #: ../../source/docker/index.rst:2 @@ -2818,6 +2971,13 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + #: ../../source/docker/persist-superlink-state.rst:21 #, fuzzy msgid "" @@ -2929,46 +3089,133 @@ msgstr "创建超级节点 Dockerfile" #: ../../source/docker/run-as-subprocess.rst:2 #, fuzzy -msgid "Run ClientApp as a Subprocess" +msgid "Run ServerApp or ClientApp as a Subprocess" msgstr "运行分类器和测试" #: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"In this mode, the ClientApp is executed as a subprocess within the " -"SuperNode Docker container, rather than running in a separate container. " -"This approach reduces the number of running containers, which can be " -"beneficial for environments with limited resources. However, it also " -"means that the ClientApp is no longer isolated from the SuperNode, which " -"may introduce additional security concerns." +"The SuperLink and SuperNode components support two distinct isolation " +"modes, allowing for flexible deployment and control:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:13 +#: ../../source/docker/run-as-subprocess.rst:7 msgid "" -"Before running the ClientApp as a subprocess, ensure that the FAB " -"dependencies have been installed in the SuperNode images. This can be " -"done by extending the SuperNode image:" +"Subprocess Mode: In this configuration (default), the SuperLink and " +"SuperNode take responsibility for launching the ServerApp and ClientApp " +"processes internally. This differs from the ``process`` isolation-mode " +"which uses separate containers, as demonstrated in the :doc:`tutorial-" +"quickstart-docker` guide." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:12 +msgid "" +"Using the ``subprocess`` approach reduces the number of running " +"containers, which can be beneficial for environments with limited " +"resources. However, it also means that the applications are not isolated " +"from their parent containers, which may introduce additional security " +"concerns." msgstr "" #: ../../source/docker/run-as-subprocess.rst:17 +msgid "" +"Process Mode: In this mode, the ServerApp and ClientApps run in " +"completely separate processes. Unlike the alternative Subprocess mode, " +"the SuperLink or SuperNode does not attempt to create or manage these " +"processes. Instead, they must be started externally." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:22 +msgid "" +"Both modes can be mixed for added flexibility. For instance, you can run " +"the SuperLink in ``subprocess`` mode while keeping the SuperNode in " +"``process`` mode, or vice versa." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:25 +msgid "" +"To run the SuperLink and SuperNode in isolation mode ``process``, refer " +"to the :doc:`tutorial-quickstart-docker` guide. To run them in " +"``subprocess`` mode, follow the instructions below." +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 #, fuzzy -msgid "Dockerfile.supernode" -msgstr "Flower 服务器" +msgid "ServerApp" +msgstr "服务器" + +#: ../../source/docker/run-as-subprocess.rst:33 +#: ../../source/docker/run-as-subprocess.rst:74 +#, fuzzy +msgid "**Prerequisites**" +msgstr "先决条件" + +#: ../../source/docker/run-as-subprocess.rst:35 +msgid "" +"1. Before running the ServerApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperLink images. This can be " +"done by extending the SuperLink image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:38 +#, fuzzy +msgid "superlink.Dockerfile" +msgstr "创建超级节点 Dockerfile" -#: ../../source/docker/run-as-subprocess.rst:31 +#: ../../source/docker/run-as-subprocess.rst:52 #, fuzzy msgid "" -"Next, build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile is located:" +"2. Next, build the SuperLink Docker image by running the following " +"command in the directory where Dockerfile is located:" msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" -#: ../../source/docker/run-as-subprocess.rst:39 -msgid "Run the ClientApp as a Subprocess" +#: ../../source/docker/run-as-subprocess.rst:59 +#, fuzzy +msgid "**Run the ServerApp as a Subprocess**" +msgstr "运行分类器和测试" + +#: ../../source/docker/run-as-subprocess.rst:61 +msgid "" +"Start the SuperLink and run the ServerApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "客户端" + +#: ../../source/docker/run-as-subprocess.rst:76 +msgid "" +"1. Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:41 +#: ../../source/docker/run-as-subprocess.rst:80 +#, fuzzy +msgid "supernode.Dockerfile" +msgstr "创建超级节点 Dockerfile" + +#: ../../source/docker/run-as-subprocess.rst:94 +#, fuzzy +msgid "" +"2. Next, build the SuperNode Docker image by running the following " +"command in the directory where Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" + +#: ../../source/docker/run-as-subprocess.rst:101 +#, fuzzy +msgid "**Run the ClientApp as a Subprocess**" +msgstr "运行分类器和测试" + +#: ../../source/docker/run-as-subprocess.rst:103 msgid "" -"Start the SuperNode with the flag ``--isolation subprocess``, which tells" -" the SuperNode to execute the ClientApp as a subprocess:" +"Start the SuperNode and run the ClientApp as a subprocess (note that the " +"subprocess mode is the default, so you do not have to explicitly set the " +"``--isolation`` flag):" msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 @@ -3016,7 +3263,9 @@ msgstr "验证 Docker 守护进程是否正在运行。" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 -msgid "Docker Compose is `installed `_." +msgid "" +"Docker Compose V2 is `installed " +"`_." msgstr "" #: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 @@ -3037,32 +3286,14 @@ msgid "" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:45 #, fuzzy -msgid "Build and start the services using the following command:" +msgid "" +"Export the version of Flower that your environment uses. Then, build and " +"start the services using the following command:" msgstr "运行以下命令激活 virtualenv:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 -#, fuzzy -msgid "" -"Append the following lines to the end of the ``pyproject.toml`` file and " -"save it:" -msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 -#: ../../source/docker/tutorial-quickstart-docker.rst:324 -#, fuzzy -msgid "pyproject.toml" -msgstr "或 ``pyproject.toml```:" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 -msgid "" -"You can customize the string that follows ``tool.flwr.federations.`` to " -"fit your needs. However, please note that the string cannot contain a dot" -" (``.``)." -msgstr "" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3070,77 +3301,78 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 -#, fuzzy -msgid "Run the example:" -msgstr "将示例联邦化" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 -msgid "Follow the logs of the SuperExec service:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:72 +msgid "Run the example and follow the logs of the ``ServerApp`` :" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:78 msgid "" "That is all it takes! You can monitor the progress of the run through the" -" logs of the SuperExec." +" logs of the ``ServerApp``." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:91 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 #, fuzzy msgid "Limitations" msgstr "运行模拟" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 #, fuzzy msgid "Quickstart Example" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "quickstart-fastai" msgstr "快速入门 fastai" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/ref-changelog.md:236 ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:879 ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:1001 ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1132 msgid "None" msgstr "无" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-huggingface" msgstr "快速入门教程" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-jax" msgstr "快速入门 JAX" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "快速入门 JAX" + #: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #, fuzzy msgid "" "The example has not yet been updated to work with the latest ``flwr`` " @@ -3149,65 +3381,56 @@ msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以 #: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "quickstart-mlcube" -msgstr "快速入门 JAX" - -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 -#, fuzzy msgid "quickstart-mlx" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 #, fuzzy msgid "quickstart-monai" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-pandas" msgstr "快速入门Pandas" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy msgid "quickstart-pytorch-lightning" msgstr "快速入门 PyTorch Lightning" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy msgid "quickstart-pytorch" msgstr "PyTorch快速入门" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "quickstart-sklearn-tabular" msgstr "scikit-learn快速入门" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy msgid "quickstart-tabnet" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tensorflow" msgstr "快速入门 TensorFlow" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 -msgid "Only runs on AMD64." -msgstr "" - #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -3237,8 +3460,8 @@ msgid "" "You will learn how to run the Flower client and server components on two " "separate machines, with Flower configured to use TLS encryption and " "persist SuperLink state across restarts. A server consists of a SuperLink" -" and ``SuperExec``. For more details about the Flower architecture, refer" -" to the :doc:`../explanation-flower-architecture` explainer page." +" and a ``ServerApp``. For more details about the Flower architecture, " +"refer to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 @@ -3293,134 +3516,144 @@ msgstr "" msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:47 msgid "" "Use the ``certs.yml`` Compose file to generate your own self-signed " "certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:212 msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:54 msgid "" "For production environments, you may have to use dedicated services to " "obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:57 msgid "" -"First, set the environment variables ``SUPERLINK_IP`` and " -"``SUPEREXEC_IP`` with the IP address from the remote machine. For " -"example, if the IP is ``192.168.2.33``, execute:" +"First, set the environment variable ``SUPERLINK_IP`` with the IP address " +"from the remote machine. For example, if the IP is ``192.168.2.33``, " +"execute:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:64 msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:71 msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:73 msgid "" "Use the method that works best for you to copy the ``server`` directory, " -"the certificates, and your Flower project to the remote machine." +"the certificates, and the ``pyproject.toml`` file of your Flower project " +"to the remote machine." msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:86 #, fuzzy msgid "Step 3: Start the Flower Server Components" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:88 msgid "" "Log into the remote machine using ``ssh`` and run the following command " -"to start the SuperLink and SuperExec services:" +"to start the SuperLink and ``ServerApp`` services:" msgstr "" #: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``server`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the server ``compose.yml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:107 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions. After exporting the ``PROJECT_DIR`` (after line " +"4), run the following commands:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:116 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:165 +msgid "" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:118 msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:121 #, fuzzy msgid "Step 4: Start the Flower Client Components" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:123 msgid "" "On your local machine, run the following command to start the client " "components:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:133 msgid "" -"The Path of the ``PROJECT_DIR`` should be relative to the location of the" -" ``client`` Docker Compose files." +"The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file " +"should be relative to the location of the client ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:137 #, fuzzy msgid "Step 5: Run Your Flower Project" msgstr "Flower 服务器。" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Specify the remote SuperExec IP addresses and the path to the root " -"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " -"the ``pyproject.toml`` file. Here, we have named our remote federation " -"``remote-superexec``:" +"Specify the remote SuperLink IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-deployment]`` table in" +" the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-deployment``:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:143 #, fuzzy msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "scikit-learn快速入门" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 -msgid "" -"The Path of the ``root-certificates`` should be relative to the location " -"of the ``pyproject.toml`` file." -msgstr "" - -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 -msgid "To run the project, execute:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:155 +msgid "Run the project and follow the ``ServerApp`` logs:" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 msgid "" "That's it! With these steps, you've set up Flower on two separate " "machines and are ready to start using it." msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:165 msgid "Step 6: Clean Up" msgstr "" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:167 #, fuzzy msgid "Shut down the Flower client components:" msgstr "Flower 客户端。" -#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:174 msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" @@ -3442,16 +3675,16 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:33 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:39 +#: ../../source/docker/tutorial-quickstart-docker.rst:38 msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:44 msgid "" "User-defined networks, such as ``flwr-network``, enable IP resolution of " "container names, a feature absent in the default bridge network. This " @@ -3459,53 +3692,56 @@ msgid "" "first." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#: ../../source/docker/tutorial-quickstart-docker.rst:49 #, fuzzy msgid "Step 2: Start the SuperLink" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 -#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:64 +#: ../../source/docker/tutorial-quickstart-docker.rst:51 #, fuzzy msgid "Open your terminal and run:" msgstr "打开另一台终端,启动第二个客户端:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "Understand the command" +msgid "" +"``-p 9091:9091 -p 9092:9092 -p 9093:9093``: Map port ``9091``, ``9092`` " +"and ``9093`` of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " -"container to the same port of" +"container to the same port of the host machine, allowing other services " +"to access the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "the host machine, allowing other services to access the Driver API on" +msgid "" +"ServerAppIO API on ``http://localhost:9091``, the Fleet API on " +"``http://localhost:9092`` and" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgid "the Exec API on ``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:71 -#: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:219 -#: ../../source/docker/tutorial-quickstart-docker.rst:309 +#: ../../source/docker/tutorial-quickstart-docker.rst:74 +#: ../../source/docker/tutorial-quickstart-docker.rst:114 +#: ../../source/docker/tutorial-quickstart-docker.rst:223 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:72 +#: ../../source/docker/tutorial-quickstart-docker.rst:75 msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:73 -#: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:220 -#: ../../source/docker/tutorial-quickstart-docker.rst:311 +#: ../../source/docker/tutorial-quickstart-docker.rst:76 +#: ../../source/docker/tutorial-quickstart-docker.rst:116 +#: ../../source/docker/tutorial-quickstart-docker.rst:225 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3527,16 +3763,26 @@ msgstr "" msgid "unencrypted communication." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"independent process. The SuperLink does not attempt to create it. You can" +" learn more about" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the different process modes here: :doc:`run-as-subprocess`." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:86 #, fuzzy -msgid "Step 3: Start the SuperNode" +msgid "Step 3: Start the SuperNodes" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker.rst:82 +#: ../../source/docker/tutorial-quickstart-docker.rst:88 msgid "Start two SuperNode containers." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:84 +#: ../../source/docker/tutorial-quickstart-docker.rst:90 msgid "Start the first container:" msgstr "" @@ -3552,18 +3798,18 @@ msgstr "" msgid "``http://localhost:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:109 +#: ../../source/docker/tutorial-quickstart-docker.rst:115 msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " -"to be run and the specific tag" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: This is the " +"name of the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "of the image." +msgid "image to be run and the specific tag of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst @@ -3588,51 +3834,54 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--supernode-address 0.0.0.0:9094``: Set the address and port number " -"that the SuperNode" +"``--clientappio-api-address 0.0.0.0:9094``: Set the address and port " +"number that the" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "is listening on." +msgid "SuperNode is listening on to communicate with the ClientApp. If" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--isolation process``: Tells the SuperNode that the ClientApp is " -"created by separate" +"two SuperNodes are started on the same machine, set two different port " +"numbers for each SuperNode." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "independent process. The SuperNode does not attempt to create it." +msgid "" +"(E.g. In the next step, we set the second SuperNode container to listen " +"on port 9095)" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#: ../../source/docker/tutorial-quickstart-docker.rst:132 #, fuzzy msgid "Start the second container:" msgstr "启动服务器" -#: ../../source/docker/tutorial-quickstart-docker.rst:142 -msgid "Step 4: Start the ClientApp" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#, fuzzy +msgid "Step 4: Start a ServerApp" +msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker.rst:144 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 msgid "" -"The ClientApp Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own ClientApp image. In order to " +"The ServerApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ServerApp image. In order to " "install the FAB dependencies, you will need to create a Dockerfile that " -"extends the ClientApp image and installs the required dependencies." +"extends the ServerApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:149 +#: ../../source/docker/tutorial-quickstart-docker.rst:157 msgid "" -"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " -"the following code into it:" +"Create a ServerApp Dockerfile called ``serverapp.Dockerfile`` and paste " +"the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#: ../../source/docker/tutorial-quickstart-docker.rst:160 #, fuzzy -msgid "Dockerfile.clientapp" -msgstr "Flower 客户端。" +msgid "serverapp.Dockerfile" +msgstr "创建超级节点 Dockerfile" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy @@ -3641,13 +3890,13 @@ msgstr "创建超级节点 Dockerfile" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/serverapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"to be built from is the ``flwr/clientapp image``, version :substitution-" +"to be built from is the ``flwr/serverapp`` image, version :substitution-" "code:`|stable_flwr_version|`." msgstr "" @@ -3706,7 +3955,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"``ENTRYPOINT [\"flwr-serverapp\"]``: Set the command ``flwr-serverapp`` " "to be" msgstr "" @@ -3714,7 +3963,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:186 +#: ../../source/docker/tutorial-quickstart-docker.rst:194 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3723,217 +3972,211 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:192 -#, fuzzy -msgid "" -"Next, build the ClientApp Docker image by running the following command " -"in the directory where the Dockerfile is located:" -msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" - -#: ../../source/docker/tutorial-quickstart-docker.rst:201 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst:200 msgid "" -"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " -"Remember that these values are merely examples, and you can customize " -"them according to your requirements." -msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the ServerApp image:" +msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:205 -#, fuzzy -msgid "Start the first ClientApp container:" -msgstr "使用虚拟客户端引擎" +#: ../../source/docker/tutorial-quickstart-docker.rst:224 +msgid "``--name serverapp``: Assign the name ``serverapp`` to the container." +msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy msgid "" -"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +"``flwr_serverapp:0.0.1``: This is the name of the image to be run and the" " specific tag" -msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" +msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "" -"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" -" the address" +msgid "of the image." msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``supernode-1:9094``." +msgid "" +"``--serverappio-api-address superlink:9091``: Connect to the SuperLink's " +"ServerAppIO API" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:226 -msgid "Start the second ClientApp container:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "at the address ``superlink:9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:234 #, fuzzy -msgid "Step 5: Start the SuperExec" +msgid "Step 5: Start the ClientApp" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#: ../../source/docker/tutorial-quickstart-docker.rst:236 #, fuzzy msgid "" -"The procedure for building and running a SuperExec image is almost " -"identical to the ClientApp image." +"The procedure for building and running a ClientApp image is almost " +"identical to the ServerApp image." msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" -"Similar to the ClientApp image, you will need to create a Dockerfile that" -" extends the SuperExec image and installs the required FAB dependencies." +"Similar to the ServerApp image, you will need to create a Dockerfile that" +" extends the ClientApp image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " -"the following code in:" +"Create a ClientApp Dockerfile called ``clientapp.Dockerfile`` and paste " +"the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:248 -msgid "Dockerfile.superexec" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:245 +#, fuzzy +msgid "clientapp.Dockerfile" +msgstr "客户端" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" " specifies that the Docker image" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"to be built from is the ``flwr/superexec image``, version :substitution-" +"to be built from is the ``flwr/clientapp`` image, version :substitution-" "code:`|stable_flwr_version|`." -msgstr "" +msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" -"superexec`` to be" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:283 +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#, fuzzy msgid "" -"Afterward, in the directory that holds the Dockerfile, execute this " -"Docker command to build the SuperExec image:" -msgstr "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" -#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#: ../../source/docker/tutorial-quickstart-docker.rst:286 #, fuzzy -msgid "Start the SuperExec container:" -msgstr "启动服务器" - -#: ../../source/docker/tutorial-quickstart-docker.rst -msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"the host machine, allowing you to access the SuperExec API on " -"``http://localhost:9093``." -msgstr "" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#: ../../source/docker/tutorial-quickstart-docker.rst:310 -msgid "``--name superexec``: Assign the name ``superexec`` to the container." -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "使用虚拟客户端引擎" #: ../../source/docker/tutorial-quickstart-docker.rst #, fuzzy msgid "" -"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" " specific tag" -msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" +msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" #: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " -"SuperExec executor to" +"``--clientappio-api-address supernode-1:9094``: Connect to the " +"SuperNode's ClientAppIO" msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst -msgid "connect to the SuperLink running on port ``9091``." +msgid "API at the address ``supernode-1:9094``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:314 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:320 +#: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#: ../../source/docker/tutorial-quickstart-docker.rst:328 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/docker/tutorial-quickstart-docker.rst:331 -msgid "Run the ``quickstart-docker`` project by executing the command:" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:337 -msgid "Follow the SuperExec logs to track the execution of the run:" +msgid "" +"Run the ``quickstart-docker`` project and follow the ServerApp logs to " +"track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:345 #, fuzzy msgid "Step 7: Update the Application" msgstr "步骤 3:自定义序列化" -#: ../../source/docker/tutorial-quickstart-docker.rst:346 +#: ../../source/docker/tutorial-quickstart-docker.rst:347 msgid "" "Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#: ../../source/docker/tutorial-quickstart-docker.rst:350 #, fuzzy msgid "quickstart_docker/task.py" msgstr "快速入门Pandas" -#: ../../source/docker/tutorial-quickstart-docker.rst:356 +#: ../../source/docker/tutorial-quickstart-docker.rst:357 #, fuzzy -msgid "Stop the current ClientApp containers:" +msgid "Stop the current ServerApp and ClientApp containers:" msgstr "当前客户端属性。" -#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:361 +msgid "" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:364 +msgid "If you haven’t made any changes, you can skip steps 2 through 4." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:370 #, fuzzy -msgid "Rebuild the FAB and ClientApp image:" +msgid "Rebuild ServerApp and ClientApp images:" msgstr "加载数据" -#: ../../source/docker/tutorial-quickstart-docker.rst:368 -msgid "Launch two new ClientApp containers based on the newly built image:" +#: ../../source/docker/tutorial-quickstart-docker.rst:377 +msgid "" +"Launch one new ServerApp and two new ClientApp containers based on the " +"newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:383 +#: ../../source/docker/tutorial-quickstart-docker.rst:402 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:390 +#: ../../source/docker/tutorial-quickstart-docker.rst:409 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:392 +#: ../../source/docker/tutorial-quickstart-docker.rst:411 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 -#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker.rst:423 #, fuzzy msgid "Where to Go Next" msgstr "从哪里开始" -#: ../../source/docker/tutorial-quickstart-docker.rst:406 +#: ../../source/docker/tutorial-quickstart-docker.rst:425 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:407 +#: ../../source/docker/tutorial-quickstart-docker.rst:426 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:427 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3960,177 +4203,162 @@ msgstr "" msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:39 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:46 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " -"SuperExec and SuperNode images correctly." +"``ServerApp`` and ``ClientApp`` images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Flower 服务器。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:53 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:61 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:175 #, fuzzy msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 -msgid "" -"``-f compose.yml``: Specify the YAML file that contains the basic Flower " -"service definitions." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:181 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:182 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:77 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 msgid "" -"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" -" the SuperExec addresses in the ``pyproject.toml`` file." +"To ensure the ``flwr`` CLI connects to the SuperLink, you need to specify" +" the SuperLink addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:85 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:225 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 -msgid "Execute the command to run the quickstart example:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 -msgid "Monitor the SuperExec logs and wait for the summary to appear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:94 +msgid "" +"Run the quickstart example, monitor the ``ServerApp`` logs and wait for " +"the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:102 #, fuzzy msgid "Step 4: Update the Application" msgstr "步骤 3:自定义序列化" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:121 #, fuzzy msgid "Rebuild and restart the services." msgstr "我们已经可以启动*服务器*了:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 -msgid "" -"If you have modified the dependencies listed in your ``pyproject.toml`` " -"file, it is essential to rebuild images." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:128 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:130 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:136 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 -msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the ``ServerApp`` logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:155 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:157 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:162 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 -msgid "" -"For more information, consult the following page: :doc:`persist-" -"superlink-state`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:167 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +msgid "Run the command:" msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 -msgid "Run the command:" +msgid "" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst @@ -4150,17 +4378,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:238 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:197 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -4168,121 +4396,106 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:205 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:207 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 -msgid "Step 7: Add another SuperNode" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:245 +#, fuzzy +msgid "Step 7: Add another SuperNode and ClientApp" +msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 msgid "" -"You can add more SuperNodes and ClientApps by duplicating their " -"definitions in the ``compose.yml`` file." +"You can add more SuperNodes and ClientApps by uncommenting their " +"definitions in the ``compose.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 -msgid "" -"Just give each new SuperNode and ClientApp service a unique service name " -"like ``supernode-3``, ``clientapp-3``, etc." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:250 +msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 -msgid "In ``compose.yml``, add the following:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:302 +msgid "" +"If you also want to enable TLS for the new SuperNode, uncomment the " +"definition in the ``with-tls.yml`` file:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 -msgid "compose.yml" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 -msgid "" -"If you also want to enable TLS for the new SuperNodes, duplicate the " -"SuperNode definition for each new SuperNode service in the ``with-" -"tls.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 -msgid "" -"Make sure that the names of the services match with the one in the " -"``compose.yml`` file." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 -msgid "In ``with-tls.yml``, add the following:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:305 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:326 +#, fuzzy +msgid "Restart the services with:" +msgstr "启动服务器" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:335 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 -msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:340 +msgid "Comment out the lines 2-6 and uncomment the lines 7-13:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:342 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "启动服务器" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:376 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:378 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 #, fuzzy msgid "Remove all services and volumes:" msgstr "从 R 中删除所有项目。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:402 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "快速入门 iOS" @@ -4314,443 +4527,6 @@ msgstr "" "1.9.0(稳定版)发布时推出(预计发布时间:5 " "月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 -#, fuzzy -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." -msgstr "" -"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " -"从集中式到联邦式 `_ 做少量改动。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "Centralized Training" -msgstr "集中式训练" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 -#, fuzzy -msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called ``cifar.py``, revised part is " -"shown below:" -msgstr "" -"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " -"的文件,修改部分如下所示:" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 -msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 -msgid "You can now run your machine learning workload:" -msgstr "现在,您可以运行您的机器学习工作了:" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 -#, fuzzy -msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." -msgstr "" -"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " -"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 -msgid "Federated Training" -msgstr "联邦培训" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 -#, fuzzy -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only ``get_parameters`` and ``set_parameters`` function " -"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" -" PyTorch - From Centralized To Federated `. first." -msgstr "" -"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " -":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " -"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -"``server.py`` keeps unchanged, we can start the server directly." -msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 -#, fuzzy -msgid "" -"Finally, we will revise our *client* logic by changing ``get_parameters``" -" and ``set_parameters`` in ``client.py``, we will exclude batch " -"normalization parameters from model parameter list when sending to or " -"receiving from the server." -msgstr "" -"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " -":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " -"normalization层的参数。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "现在,您可以打开另外两个终端窗口并运行程序" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 -#: ../../source/tutorial-quickstart-jax.rst:319 -msgid "Next Steps" -msgstr "下一步工作" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 -msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" -msgstr "" -"本示例的完整源代码可在 `_ " -"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " -"CIFAR-10 子集,或者增加客户端的数量。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "实例: PyTorch - 从集中式到联邦式" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" -"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " -"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." -msgstr "" -"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " -"`_。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 -#, fuzzy -msgid "" -"Let's create a new file called ``cifar.py`` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as ``torch`` and ``torchvision``) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the" -" federated learning components at a later point." -msgstr "" -"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " -"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " -":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 -#, fuzzy -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in ``class Net()``." -msgstr "" -"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " -"中定义。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 -#, fuzzy -msgid "" -"The ``load_data()`` function loads the CIFAR-10 training and test sets. " -"The ``transform`` normalized the data after loading." -msgstr "" -":code:`load_data()` 函数加载 CIFAR-10 " -"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 -#, fuzzy -msgid "" -"We now need to define the training (function ``train()``) which loops " -"over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 -#, fuzzy -msgid "" -"The evaluation of the model is defined in the function ``test()``. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." -msgstr "" -"到目前为止,如果你以前用过 " -"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 -#, fuzzy -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in ``cifar.py`` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." -msgstr "" -"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 -#: ../../source/tutorial-quickstart-jax.rst:147 -#, fuzzy -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -"``server.py`` first. The *server* needs to import the Flower package " -"``flwr``. Next, we use the ``start_server`` function to start a server " -"and tell it to perform three rounds of federated learning." -msgstr "" -"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " -":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 -#: ../../source/tutorial-quickstart-jax.rst:161 -msgid "We can already start the *server*:" -msgstr "我们已经可以启动*服务器*了:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#, fuzzy -msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined centralized training in ``cifar.py``. Our " -"*client* needs to import ``flwr``, but also ``torch`` to update the " -"parameters on our PyTorch model:" -msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " -"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " -"PyTorch 模型的参数:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 -#, fuzzy -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " -"than ``Client`` if you use a framework with good NumPy interoperability " -"(like PyTorch or TensorFlow/Keras) because it avoids some of the " -"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " -"implement four methods, two methods for getting/setting model parameters," -" one method for training the model, and one method for testing the model:" -msgstr "" -"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " -":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#, fuzzy -msgid "``set_parameters``" -msgstr ":code:`set_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 -#: ../../source/tutorial-quickstart-jax.rst:192 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "在本地模型上设置从服务器接收的模型参数" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "" -"loop over the list of model parameters received as NumPy ``ndarray``'s " -"(think list of neural network layers)" -msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#: ../../source/tutorial-quickstart-scikitlearn.rst:129 -#, fuzzy -msgid "``get_parameters``" -msgstr ":code:`get_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 -#: ../../source/tutorial-quickstart-jax.rst:197 -#, fuzzy -msgid "" -"get the model parameters and return them as a list of NumPy ``ndarray``'s" -" (which is what ``flwr.client.NumPyClient`` expects)" -msgstr "" -"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " -":code:`flwr.client.NumPyClient`所匹配的格式)" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "``fit``" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 -#: ../../source/tutorial-quickstart-jax.rst:200 -#: ../../source/tutorial-quickstart-jax.rst:205 -msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "用从服务器接收到的参数更新本地模型的参数" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 -#: ../../source/tutorial-quickstart-jax.rst:202 -msgid "train the model on the local training set" -msgstr "在本地训练集上训练模型" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 -msgid "get the updated local model weights and return them to the server" -msgstr "获取更新后的本地模型参数并发送回服务器" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -#: ../../source/tutorial-quickstart-jax.rst:208 -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -#, fuzzy -msgid "``evaluate``" -msgstr ":code:`evaluate`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 -#: ../../source/tutorial-quickstart-jax.rst:207 -msgid "evaluate the updated model on the local test set" -msgstr "在本地测试集上评估更新后的模型" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 -msgid "return the local loss and accuracy to the server" -msgstr "向服务器返回本地损失值和精确度" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 -#, fuzzy -msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " -"So what we really do here is we tell Flower through our ``NumPyClient`` " -"subclass which of our already defined functions to call for training and " -"evaluation. We included type annotations to give you a better " -"understanding of the data types that get passed around." -msgstr "" -"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " -":code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 -#, fuzzy -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a ``CifarClient``, and starts this client. You load your " -"data and model by using ``cifar.py``. Start ``CifarClient`` with the " -"function ``fl.client.start_client()`` by pointing it at the same IP " -"address we used in ``server.py``:" -msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 -#: ../../source/tutorial-quickstart-jax.rst:309 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" -"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" -" CIFAR-10 子集会如何?增加更多客户端会如何?" - #: ../../source/explanation-differential-privacy.rst:2 #: ../../source/explanation-differential-privacy.rst:14 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 @@ -5017,7 +4793,7 @@ msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素 #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:141 -#: ../../source/how-to-use-differential-privacy.rst:113 +#: ../../source/how-to-use-differential-privacy.rst:114 #, fuzzy msgid "Local Differential Privacy" msgstr "差分隐私" @@ -5111,7 +4887,6 @@ msgstr "" "17455-17466." #: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated evaluation" msgstr "联邦学习评估" @@ -5138,11 +4913,11 @@ msgid "" "return evaluation results:" msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" -#: ../../source/explanation-federated-evaluation.rst:61 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "Custom Strategies" msgstr "定制策略" -#: ../../source/explanation-federated-evaluation.rst:63 +#: ../../source/explanation-federated-evaluation.rst:72 #, fuzzy msgid "" "The ``Strategy`` abstraction provides a method called ``evaluate`` that " @@ -5153,32 +4928,33 @@ msgstr "" ":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " "的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" -#: ../../source/explanation-federated-evaluation.rst:69 +#: ../../source/explanation-federated-evaluation.rst:78 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated Evaluation" msgstr "联邦评估" -#: ../../source/explanation-federated-evaluation.rst:72 +#: ../../source/explanation-federated-evaluation.rst:81 msgid "Implementing Federated Evaluation" msgstr "实现联邦评估" -#: ../../source/explanation-federated-evaluation.rst:74 +#: ../../source/explanation-federated-evaluation.rst:83 #, fuzzy msgid "" "Client-side evaluation happens in the ``Client.evaluate`` method and can " "be configured from the server side." msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/explanation-federated-evaluation.rst:116 msgid "Configuring Federated Evaluation" msgstr "配置联邦评估" -#: ../../source/explanation-federated-evaluation.rst:110 +#: ../../source/explanation-federated-evaluation.rst:118 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" -#: ../../source/explanation-federated-evaluation.rst:113 +#: ../../source/explanation-federated-evaluation.rst:121 #, fuzzy msgid "" "``fraction_evaluate``: a ``float`` defining the fraction of clients that " @@ -5192,7 +4968,7 @@ msgstr "" ":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " ":code:`0.0`,联邦评估将被禁用。" -#: ../../source/explanation-federated-evaluation.rst:118 +#: ../../source/explanation-federated-evaluation.rst:126 #, fuzzy msgid "" "``min_evaluate_clients``: an ``int``: the minimum number of clients to be" @@ -5204,7 +4980,7 @@ msgstr "" ":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " "设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/explanation-federated-evaluation.rst:122 +#: ../../source/explanation-federated-evaluation.rst:130 #, fuzzy msgid "" "``min_available_clients``: an ``int`` that defines the minimum number of " @@ -5217,7 +4993,7 @@ msgstr "" ":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " ":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/explanation-federated-evaluation.rst:127 +#: ../../source/explanation-federated-evaluation.rst:135 #, fuzzy msgid "" "``on_evaluate_config_fn``: a function that returns a configuration " @@ -5227,27 +5003,29 @@ msgid "" "the number of validation steps performed." msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" -#: ../../source/explanation-federated-evaluation.rst:157 +#: ../../source/explanation-federated-evaluation.rst:177 msgid "Evaluating Local Model Updates During Training" msgstr "评估训练期间的本地模型更新" -#: ../../source/explanation-federated-evaluation.rst:159 +#: ../../source/explanation-federated-evaluation.rst:179 #, fuzzy msgid "" "Model parameters can also be evaluated during training. ``Client.fit`` " "can return arbitrary evaluation results as a dictionary:" msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" -#: ../../source/explanation-federated-evaluation.rst:201 +#: ../../source/explanation-federated-evaluation.rst:220 msgid "Full Code Example" msgstr "完整代码示例" -#: ../../source/explanation-federated-evaluation.rst:203 +#: ../../source/explanation-federated-evaluation.rst:222 +#, fuzzy msgid "" "For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"evaluation, see the `Advanced TensorFlow Example " +"`_" +" (the same approach can be applied to workloads implemented in any other " +"framework)." msgstr "" "有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " "Example*(同样的方法也可应用于任何其他框架中): " @@ -5458,607 +5236,100 @@ msgid "" "a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/explanation-flower-architecture.rst:121 -msgid "" -"To help you start and manage all of the concurrently executing training " -"runs, Flower offers one additional long-running server-side service " -"called **SuperExec**. When you type ``flwr run`` to start a new training " -"run, the ``flwr`` CLI bundles your local project (mainly your " -"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " -"**SuperExec** will then take care of starting and managing your " -"``ServerApp``, which in turn selects SuperNodes to execute your " -"``ClientApp``." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:128 -msgid "" -"This architecture allows many users to (concurrently) run their projects " -"on the same federation, simply by typing ``flwr run`` on their local " -"developer machine." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "Flower Deployment Engine with SuperExec" -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:137 -msgid "The SuperExec service for managing concurrent training runs in Flower." -msgstr "" - -#: ../../source/explanation-flower-architecture.rst:141 +#: ../../source/explanation-flower-architecture.rst:123 msgid "" "This explanation covers the Flower Deployment Engine. An explanation " "covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:146 +#: ../../source/explanation-flower-architecture.rst:128 #, fuzzy msgid "" "As we continue to enhance Flower at a rapid pace, we'll periodically " "update this explainer document. Feel free to share any feedback with us." msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 模板" - -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "目录" - -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[目录](#table-of-contents)" - -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[总结](#summary)" - -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[动机](#motivation)" - -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[目标](#goals)" - -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[非目标](#non-goals)" - -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[计划](#proposal)" - -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[缺点](#drawbacks)" - -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[备选方案](#alternatives-considered)" - -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[附录](#appendix)" - -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "总结" - -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 句子 1: 问题概括\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 句子 2: 解决方案概括\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "动机" - -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "目标" - -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "非目标" - -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "提案" - -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "缺点" - -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "备选方案" - -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[备选 1\\]" - -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[备选 2\\]" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower 改善文档" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[增强文档模版](#enhancement-doc-template)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[描述数据](#metadata)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[工作流程](#workflow)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub 问题](#github-issues)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[谷歌文档](#google-docs)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "为提出更大规模的改动提供一个共同的结构" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "确保改动的动机明确" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "整合评估结果" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "将项目信息保存在版本控制系统中" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "记录面向用户的具有影响力的改动的动机" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "保留 GitHub 问题,用于跟踪进行中的工作" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "因此,\"增强文件\"将以下方面结合起来" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "一个功能和效力跟踪文档" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "一个产品需要文档" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "一个设计文档" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "自定义整合评估结果" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "该文件是与社区合作逐步创建的。" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/how-to-aggregate-evaluation-results.rst:38 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." -msgstr "" -"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " -"request)的抽象概念,以了解和沟通项目即将发生的变更。" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" + +#: ../../source/how-to-authenticate-supernodes.rst:2 +#, fuzzy +msgid "Authenticate SuperNodes" +msgstr "验证超级节点" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/how-to-authenticate-supernodes.rst:4 +#, fuzzy msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " -"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" +"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " +"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" +#: ../../source/how-to-authenticate-supernodes.rst:8 +#, fuzzy +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/how-to-authenticate-supernodes.rst:9 +#, fuzzy msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/how-to-authenticate-supernodes.rst:10 +#, fuzzy msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." -msgstr "" -"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " -"\"的工作或使用方式。" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" +#: ../../source/how-to-authenticate-supernodes.rst:12 +#, fuzzy +msgid "SuperLink verifies the token" +msgstr "超级链接验证令牌" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/how-to-authenticate-supernodes.rst:14 +#, fuzzy msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "增强文档模板" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." +msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 -msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "标题(与描述数据中的标题相同)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "目录(如有需要)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "注意事项/限制/警告(可选)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "设计细节(可选)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "毕业标准" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "升级/降级策略(如适用)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "作为参考,本文件采用上述结构。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "描述数据" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**标题** (必填)用简明语言写出提案的标题。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 -msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "**另见** (可选)与本提案相关的其他提案清单。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**取代**(可选) 这份提案所取代的提案列表。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**被取代者** (可选) 此提案取代的提案列表。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "工作流程" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." -msgstr "" -"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " -"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " -"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." -msgstr "" -"一旦增强功能通过审核和批准,其状态就会变为 " -"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`可实施`: 增强功能已审核通过。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`已实施`: 增强功能已实施,不再主动更改。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`撤回`: 作者已撤回增强功能。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`已替换`: 增强功能已被新的增强功能取代。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 问题" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." -msgstr "" -"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " -"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " -"问题时,管理这些多重讨论会很混乱。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "谷歌文档" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." -msgstr "" -"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" -" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" - -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 增强文件" - -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "整合评估结果" - -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" - -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "自定义整合评估结果" - -#: ../../source/how-to-aggregate-evaluation-results.rst:10 -#, fuzzy -msgid "" -"The same ``Strategy``-customization approach can be used to aggregate " -"custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" - -#: ../../source/how-to-aggregate-evaluation-results.rst:39 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" -msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" - -#: ../../source/how-to-authenticate-supernodes.rst:2 -#, fuzzy -msgid "Authenticate SuperNodes" -msgstr "验证超级节点" - -#: ../../source/how-to-authenticate-supernodes.rst:4 -#, fuzzy -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" -msgstr "" -"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " -"GitHub SSH 身份验证的工作方式类似:" - -#: ../../source/how-to-authenticate-supernodes.rst:8 -#, fuzzy -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" - -#: ../../source/how-to-authenticate-supernodes.rst:9 -#, fuzzy -msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" -msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" - -#: ../../source/how-to-authenticate-supernodes.rst:10 -#, fuzzy -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" -msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" - -#: ../../source/how-to-authenticate-supernodes.rst:12 -#, fuzzy -msgid "SuperLink verifies the token" -msgstr "超级链接验证令牌" - -#: ../../source/how-to-authenticate-supernodes.rst:14 -#, fuzzy -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." -msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" - -#: ../../source/how-to-authenticate-supernodes.rst:20 -#, fuzzy +#: ../../source/how-to-authenticate-supernodes.rst:20 +#, fuzzy msgid "" "This guide covers a preview feature that might change in future versions " "of Flower." @@ -6193,7 +5464,7 @@ msgstr "" "假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" #: ../../source/how-to-authenticate-supernodes.rst:100 -#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-enable-tls-connections.rst:108 #: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" @@ -6211,16 +5482,17 @@ msgstr "" "服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" #: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#, fuzzy +msgid "Configure Clients" msgstr "配置客户端" #: ../../source/how-to-configure-clients.rst:4 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" +"Flower provides the ability to send configuration values to clients, " +"allowing server-side control over client behavior. This feature enables " +"flexible and dynamic adjustment of client-side hyperparameters, improving" +" collaboration and experimentation." +msgstr "" #: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" @@ -6228,363 +5500,674 @@ msgstr "配置值" #: ../../source/how-to-configure-clients.rst:11 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These values must be of type ``Scalar``, which includes " +"``bool``, ``bytes``, ``float``, ``int``, or ``str`` (or equivalent types " +"in different languages). Scalar is the value type directly supported by " +"Flower for these configurations." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:17 +msgid "For example, a ``FitConfig`` dictionary might look like this:" msgstr "" -"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " -"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-configure-clients.rst:25 +#: ../../source/how-to-configure-clients.rst:28 +#, fuzzy msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " +"Flower serializes these configuration dictionaries (or *config dicts* for" +" short) to their ProtoBuf representation, transports them to the client " "using gRPC, and then deserializes them back to Python dictionaries." msgstr "" "Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " "将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-configure-clients.rst:31 +#: ../../source/how-to-configure-clients.rst:34 +#, fuzzy msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"dictionaries. To send collections, convert them to a supported type " +"(e.g., JSON string) and decode on the client side." msgstr "" "目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " "`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-configure-clients.rst:36 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" +#: ../../source/how-to-configure-clients.rst:38 +#, fuzzy +msgid "Example:" +msgstr "实例" -#: ../../source/how-to-configure-clients.rst:41 -msgid "Configuration through built-in strategies" +#: ../../source/how-to-configure-clients.rst:51 +#, fuzzy +msgid "Configuration through Built-in Strategies" msgstr "通过内置策略进行配置" -#: ../../source/how-to-configure-clients.rst:43 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:53 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like ``FedAvg``. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the " -"current round. It then forwards the configuration dictionary to all the " -"clients selected during that round." +"Flower provides configuration options to control client behavior " +"dynamically through ``FitConfig`` and ``EvaluateConfig``. These " +"configurations allow server-side control over client-side parameters such" +" as batch size, number of local epochs, learning rate, and evaluation " +"settings, improving collaboration and experimentation." msgstr "" -"向客户端发送配置值的最简单方法是使用内置策略,如 " -":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" - -#: ../../source/how-to-configure-clients.rst:49 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-configure-clients.rst:65 +#: ../../source/how-to-configure-clients.rst:59 #, fuzzy +msgid "``FitConfig`` and ``EvaluateConfig``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-configure-clients.rst:61 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -"``on_fit_config_fn``:" -msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" +"``FitConfig`` and ``EvaluateConfig`` are dictionaries containing " +"configuration values that the server sends to clients during federated " +"learning rounds. These dictionaries enable the server to adjust client-" +"side hyperparameters and monitor progress effectively." +msgstr "" -#: ../../source/how-to-configure-clients.rst:75 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" +#: ../../source/how-to-configure-clients.rst:67 +#, fuzzy +msgid "``FitConfig``" +msgstr "配置日志记录" -#: ../../source/how-to-configure-clients.rst:86 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"``FitConfig`` specifies the hyperparameters for training rounds, such as " +"the batch size, number of local epochs, and other parameters that " +"influence training." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:72 +msgid "For example, a ``fit_config`` callback might look like this:" msgstr "" -"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " -"发送不同的配置值(例如,使用不同的批量大小)。" #: ../../source/how-to-configure-clients.rst:90 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"You can then pass this ``fit_config`` callback to a built-in strategy " +"such as ``FedAvg``:" msgstr "" -"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " -"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " -"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-configure-clients.rst:107 -#, fuzzy -msgid "The ``FedAvg`` strategy will call this function *every round*." -msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" - -#: ../../source/how-to-configure-clients.rst:110 -msgid "Configuring individual clients" -msgstr "配置个别客户端" - -#: ../../source/how-to-configure-clients.rst:112 +#: ../../source/how-to-configure-clients.rst:101 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" +"On the client side, the configuration is received in the ``fit`` method, " +"where it can be read and used:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:115 +#: ../../source/how-to-configure-clients.rst:124 #, fuzzy +msgid "``EvaluateConfig``" +msgstr ":code:`evaluate`" + +#: ../../source/how-to-configure-clients.rst:126 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " -"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " -"the config dict of a *single client* (only the first client in the list, " -"the other clients in this round to not receive this \"special\" config " -"value):" +"``EvaluateConfig`` specifies hyperparameters for the evaluation process, " +"such as the batch size, evaluation frequency, or metrics to compute " +"during evaluation." msgstr "" -"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " -"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " -"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "配置日志记录" +#: ../../source/how-to-configure-clients.rst:129 +msgid "For example, an ``evaluate_config`` callback might look like this:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/how-to-configure-clients.rst:143 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" +"You can pass this ``evaluate_config`` callback to a built-in strategy " +"like ``FedAvg``:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:13 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:151 msgid "" -"containing relevant information including: log message level (e.g. " -"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" +"On the client side, the configuration is received in the ``evaluate`` " +"method, where it can be used during the evaluation process:" msgstr "" -"相关信息包括:日志信息级别(例如 " -":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-configure-logging.rst:35 -msgid "Saving log to file" -msgstr "将日志保存到文件" +#: ../../source/how-to-configure-clients.rst:175 +msgid "Example: Sending Training Configurations" +msgstr "" -#: ../../source/how-to-configure-logging.rst:37 +#: ../../source/how-to-configure-clients.rst:177 #, fuzzy msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do ``fl.server.start_server``) and when using " -"the ``VirtualClientEngine`` (i.e. when you do " -"``fl.simulation.start_simulation``). In some situations you might want to" -" save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" -msgstr "" -"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " -":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " -"时(即执行 :code:`fl.simulation.start_simulation` " -"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " -"`_" -" 函数。例如:" +"Imagine we want to send (a) the batch size, (b) the current global round," +" and (c) the number of local epochs. Our configuration function could " +"look like this:" +msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-configure-logging.rst:59 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:190 msgid "" -"With the above, Flower will record the log you see on your terminal to " -"``log.txt``. This file will be created in the same directory as were you " -"are running the code from. If we inspect we see the log above is also " -"recorded but prefixing with ``identifier`` each line:" +"To use this function with a built-in strategy like ``FedAvg``, pass it to" +" the ``FedAvg`` constructor (typically in your ``server_fn``):" msgstr "" -"通过上述操作,Flower 会将您在终端上看到的日志记录到 " -":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " -":code:`identifier` 作为前缀:" -#: ../../source/how-to-configure-logging.rst:81 -msgid "Log your own messages" -msgstr "记录自己的信息" +#: ../../source/how-to-configure-clients.rst:211 +#, fuzzy +msgid "Client-Side Configuration" +msgstr "客户端逻辑" -#: ../../source/how-to-configure-logging.rst:83 +#: ../../source/how-to-configure-clients.rst:213 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" +"On the client side, configurations are received as input to the ``fit`` " +"and ``evaluate`` methods. For example:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:230 +msgid "Dynamic Configurations per Round" +msgstr "" -#: ../../source/how-to-configure-logging.rst:114 +#: ../../source/how-to-configure-clients.rst:232 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" +"Configuration functions are called at the beginning of every round. This " +"allows for dynamic adjustments based on progress. For example, you can " +"increase the number of local epochs in later rounds:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:140 -msgid "Log to a remote service" -msgstr "登录远程服务" +#: ../../source/how-to-configure-clients.rst:247 +msgid "Customizing Client Configurations" +msgstr "" -#: ../../source/how-to-configure-logging.rst:142 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:249 msgid "" -"The ``fl.common.logger.configure`` function, also allows specifying a " -"host to which logs can be pushed (via ``POST``) through a native Python " -"``logging.handler.HTTPHandler``. This is a particularly useful feature in" -" ``gRPC``-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a ``HTTPHandler`` should you " -"wish to backup or analyze the logs somewhere else." +"In some cases, it may be necessary to send different configurations to " +"individual clients. To achieve this, you can create a custom strategy by " +"extending a built-in one, such as ``FedAvg``:" msgstr "" -"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " -":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " -":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " -"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "启用 SSL 连接" +#: ../../source/how-to-configure-clients.rst:254 +msgid "Example: Client-Specific Configuration" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " -"can establish a secure connections to it." -msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" +#: ../../source/how-to-configure-clients.rst:273 +msgid "Next, use this custom strategy as usual:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:8 -msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +#: ../../source/how-to-configure-clients.rst:287 +msgid "Summary of Enhancements" msgstr "" -"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-enable-ssl-connections.rst:11 -#, fuzzy -msgid "" -"The code example comes with a ``README.md`` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" +#: ../../source/how-to-configure-clients.rst:289 +msgid "**Dynamic Configurations**: Enables per-round adjustments via functions." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "证书" +#: ../../source/how-to-configure-clients.rst:290 +msgid "**Advanced Customization**: Supports client-specific strategies." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:291 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in ``examples/advanced-" -"tensorflow/certificates/generate.sh`` with the following command " -"sequence:" +"**Client-Side Integration**: Configurations accessible in ``fit`` and " +"``evaluate``." msgstr "" -"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " -":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/how-to-design-stateful-clients.rst:2 #, fuzzy +msgid "Design stateful ClientApps" +msgstr "客户端" + +#: ../../source/how-to-design-stateful-clients.rst:20 msgid "" -"This will generate the certificates in ``examples/advanced-" -"tensorflow/.cache/certificates``." -msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" +"By design, ClientApp_ objects are stateless. This means that the " +"``ClientApp`` object is recreated each time a new ``Message`` is to be " +"processed. This behaviour is identical with Flower's Simulation Engine " +"and Deployment Engine. For the former, it allows us to simulate the " +"running of a large number of nodes on a single machine or across multiple" +" machines. For the latter, it enables each ``SuperNode`` to be part of " +"multiple runs, each running a different ``ClientApp``." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:32 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:27 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" +"When a ``ClientApp`` is executed it receives a Context_. This context is " +"unique for each ``ClientApp``, meaning that subsequent executions of the " +"same ``ClientApp`` from the same node will receive the same ``Context`` " +"object. In the ``Context``, the ``.state`` attribute can be used to store" +" information that you would like the ``ClientApp`` to have access to for " +"the duration of the run. This could be anything from intermediate results" +" such as the history of training losses (e.g. as a list of `float` values" +" with a new entry appended each time the ``ClientApp`` is executed), " +"certain parts of the model that should persist at the client side, or " +"some other arbitrary Python objects. These items would need to be " +"serialized before saving them into the context." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:40 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +#: ../../source/how-to-design-stateful-clients.rst:38 +msgid "Saving metrics to the context" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:42 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:40 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"This section will demonstrate how to save metrics such as accuracy/loss " +"values to the Context_ so they can be used in subsequent executions of " +"the ``ClientApp``. If your ``ClientApp`` makes use of NumPyClient_ then " +"entire object is also re-created for each call to methods like ``fit()`` " +"or ``evaluate()``." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:52 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:45 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" - -#: ../../source/how-to-enable-ssl-connections.rst:56 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "客户端状态代码。" +"Let's begin with a simple setting in which ``ClientApp`` is defined as " +"follows. The ``evaluate()`` method only generates a random number and " +"prints it." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:58 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:50 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"You can create a PyTorch project with ready-to-use ``ClientApp`` and " +"other components by running ``flwr new``." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:67 -#, fuzzy +#: ../../source/how-to-design-stateful-clients.rst:81 msgid "" -"When setting ``root_certificates``, the client expects a file path to " +"Let's say we want to save that randomly generated integer and append it " +"to a list that persists in the context. To do that, you'll need to do two" +" key things:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:84 +msgid "Make the ``context.state`` reachable withing your client class" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:85 +msgid "" +"Initialise the appropiate record type (in this example we use " +"ConfigsRecord_) and save/read your entry when required." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:123 +msgid "" +"If you run the app, you'll see an output similar to the one below. See " +"how after each round the `n_val` entry in the context gets one additional" +" integer ? Note that the order in which the `ClientApp` logs these " +"messages might differ slightly between rounds." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:146 +msgid "Saving model parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:148 +msgid "" +"Using ConfigsRecord_ or MetricsRecord_ to save \"simple\" components is " +"fine (e.g., float, integer, boolean, string, bytes, and lists of these " +"types. Note that MetricsRecord_ only supports float, integer, and lists " +"of these types) Flower has a specific type of record, a " +"ParametersRecord_, for storing model parameters or more generally data " +"arrays." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:153 +msgid "" +"Let's see a couple of examples of how to save NumPy arrays first and then" +" how to save parameters of PyTorch and TensorFlow models." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:158 +msgid "" +"The examples below omit the definition of a ``ClientApp`` to keep the " +"code blocks concise. To make use of ``ParametersRecord`` objects in your " +"``ClientApp`` you can follow the same principles as outlined earlier." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:163 +#, fuzzy +msgid "Saving NumPy arrays to the context" +msgstr "将 NumPy ndarray 序列化为字节。" + +#: ../../source/how-to-design-stateful-clients.rst:165 +msgid "" +"Elements stored in a `ParametersRecord` are of type Array_, which is a " +"data structure that holds ``bytes`` and metadata that can be used for " +"deserialization. Let's see how to create an ``Array`` from a NumPy array " +"and insert it into a ``ParametersRecord``. Here we will make use of the " +"built-in serialization and deserialization mechanisms in Flower, namely " +"the ``flwr.common.array_from_numpy`` function and the `numpy()` method of" +" an Array_ object." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:174 +msgid "" +"Array_ objects carry bytes as their main payload and additional metadata " +"to use for deserialization. You can implement your own " +"serialization/deserialization if the provided ``array_from_numpy`` " +"doesn't fit your usecase." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:178 +msgid "" +"Let's see how to use those functions to store a NumPy array into the " +"context." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:206 +msgid "" +"To extract the data in a ``ParametersRecord``, you just need to " +"deserialize the array if interest. For example, following the example " +"above:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:223 +msgid "Saving PyTorch parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:225 +msgid "" +"Following the NumPy example above, to save parameters of a PyTorch model " +"a straightforward way of doing so is to transform the parameters into " +"their NumPy representation and then proceed as shown earlier. Below is a " +"simple self-contained example for how to do this." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:263 +msgid "" +"Let say now you want to apply the parameters stored in your context to a " +"new instance of the model (as it happens each time a ``ClientApp`` is " +"executed). You will need to:" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:266 +msgid "Deserialize each element in your specific ``ParametersRecord``" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:267 +msgid "Construct a ``state_dict`` and load it" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:287 +msgid "" +"And that's it! Recall that even though this example shows how to store " +"the entire ``state_dict`` in a ``ParametersRecord``, you can just save " +"part of it. The process would be identical, but you might need to adjust " +"how it is loaded into an existing model using PyTorch APIs." +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:293 +msgid "Saving Tensorflow/Keras parameters to the context" +msgstr "" + +#: ../../source/how-to-design-stateful-clients.rst:295 +msgid "" +"Follow the same steps as done above but replace the ``state_dict`` logic " +"with simply `get_weights() " +"`_" +" to convert the model parameters to a list of NumPy arrays that can then " +"be serialized into an ``Array``. Then, after deserialization, use " +"`set_weights() " +"`_" +" to apply the new parameters to a model." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:2 +#, fuzzy +msgid "Enable TLS connections" +msgstr "启用 SSL 连接" + +#: ../../source/how-to-enable-tls-connections.rst:4 +#, fuzzy +msgid "" +"This guide describes how to a TLS-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." +msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" + +#: ../../source/how-to-enable-tls-connections.rst:8 +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." +msgstr "" +"有关安全连接的完整代码示例,请参见 `_ 。" + +#: ../../source/how-to-enable-tls-connections.rst:11 +#, fuzzy +msgid "" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already TLS-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." +msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" + +#: ../../source/how-to-enable-tls-connections.rst:16 +msgid "Certificates" +msgstr "证书" + +#: ../../source/how-to-enable-tls-connections.rst:18 +#, fuzzy +msgid "" +"Using TLS-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" +msgstr "" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " +":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" + +#: ../../source/how-to-enable-tls-connections.rst:29 +#, fuzzy +msgid "" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." +msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" + +#: ../../source/how-to-enable-tls-connections.rst:32 +#, fuzzy +msgid "" +"The approach for generating TLS certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." +msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" + +#: ../../source/how-to-enable-tls-connections.rst:40 +#, fuzzy +msgid "Server (SuperLink)" +msgstr "flower-superlink" + +#: ../../source/how-to-enable-tls-connections.rst:42 +#, fuzzy +msgid "" +"Navigate to the ``examples/advanced-tensorflow`` folder (`here " +"`_) and use the following terminal command to start a server " +"(SuperLink) that uses the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" + +#: ../../source/how-to-enable-tls-connections.rst:54 +#, fuzzy +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" + +#: ../../source/how-to-enable-tls-connections.rst:58 +#, fuzzy +msgid "Clients (SuperNode)" +msgstr "客户端状态代码。" + +#: ../../source/how-to-enable-tls-connections.rst:60 +#, fuzzy +msgid "" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" + +#: ../../source/how-to-enable-tls-connections.rst:71 +#, fuzzy +msgid "" +"When setting ``root_certificates``, the client expects a file path to " "PEM-encoded root certificates." msgstr "" "当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " ":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-enable-ssl-connections.rst:73 +#: ../../source/how-to-enable-tls-connections.rst:74 +#, fuzzy +msgid "" +"In another terminal, start a second SuperNode that uses the same " +"certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" + +#: ../../source/how-to-enable-tls-connections.rst:84 +msgid "" +"Note that in the second SuperNode, if you run both on the same machine, " +"you must specify a different port for the ``ClientAppIO`` API address to " +"avoid clashing with the first SuperNode." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:89 +msgid "Executing ``flwr run`` with TLS" +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:91 +msgid "" +"The root certificates used for executing ``flwr run`` is specified in the" +" ``pyproject.toml`` of your app." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:100 +msgid "" +"Note that the path to the ``root-certificates`` is relative to the root " +"of the project. Now, you can run the example by executing the following:" +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:110 #, fuzzy msgid "" "You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"using the given script, start an TLS-enabled server and have two clients " +"establish secure connections to it. You should also have learned how to " +"run your Flower project using ``flwr run`` with TLS enabled." msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" -#: ../../source/how-to-enable-ssl-connections.rst:78 +#: ../../source/how-to-enable-tls-connections.rst:117 +msgid "" +"For running a Docker setup with TLS enabled, please refer to :doc:`docker" +"/enable-tls`." +msgstr "" + +#: ../../source/how-to-enable-tls-connections.rst:121 msgid "Additional resources" msgstr "补充资源" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-tls-connections.rst:123 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" -#: ../../source/how-to-enable-ssl-connections.rst:83 +#: ../../source/how-to-enable-tls-connections.rst:126 msgid "`Let's Encrypt `_" msgstr "`让我们加密 `_" -#: ../../source/how-to-enable-ssl-connections.rst:84 +#: ../../source/how-to-enable-tls-connections.rst:127 msgid "`certbot `_" msgstr "`certbot `_" +#: ../../source/how-to-implement-fedbn.rst:2 +#, fuzzy +msgid "Implement FedBN" +msgstr "实施策略" + +#: ../../source/how-to-implement-fedbn.rst:4 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training method " +"designed for non-IID data. We are using PyTorch to train a Convolutional " +"Neural Network (with Batch Normalization layers) on the CIFAR-10 dataset." +" When applying FedBN, only minor changes are needed compared to " +":doc:`Quickstart PyTorch `." +msgstr "" +"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " +"从集中式到联邦式 `_ 做少量改动。" + +#: ../../source/how-to-implement-fedbn.rst:12 +#, fuzzy +msgid "Model" +msgstr "模块" + +#: ../../source/how-to-implement-fedbn.rst:14 +msgid "" +"A full introduction to federated learning with PyTorch and Flower can be " +"found in :doc:`Quickstart PyTorch `. This " +"how-to guide varies only a few details in ``task.py``. FedBN requires a " +"model architecture (defined in class ``Net()``) that uses Batch " +"Normalization layers:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:45 +msgid "" +"Try editing the model architecture, then run the project to ensure " +"everything still works:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:52 +msgid "" +"So far this should all look fairly familiar if you've used Flower with " +"PyTorch before." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:55 +#, fuzzy +msgid "FedBN" +msgstr "DP-FedAvg" + +#: ../../source/how-to-implement-fedbn.rst:57 +msgid "" +"To adopt FedBN, only the ``get_parameters`` and ``set_parameters`` " +"functions in ``task.py`` need to be revised. FedBN only changes the " +"client-side by excluding batch normalization parameters from being " +"exchanged with the server." +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:61 +#, fuzzy +msgid "" +"We revise the *client* logic by changing ``get_parameters`` and " +"``set_parameters`` in ``task.py``. The batch normalization parameters are" +" excluded from model parameter list when sending to or receiving from the" +" server:" +msgstr "" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " +":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " +"normalization层的参数。" + +#: ../../source/how-to-implement-fedbn.rst:90 +msgid "To test the new appraoch, run the project again:" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:96 +msgid "" +"Your PyTorch project now runs federated learning with FedBN. " +"Congratulations!" +msgstr "" + +#: ../../source/how-to-implement-fedbn.rst:99 +msgid "Next Steps" +msgstr "下一步工作" + +#: ../../source/how-to-implement-fedbn.rst:101 +#, fuzzy +msgid "" +"The example is of course over-simplified since all clients load the exact" +" same dataset. This isn't realistic. You now have the tools to explore " +"this topic further. How about using different subsets of CIFAR-10 on each" +" client? How about adding more clients?" +msgstr "" +"本示例的完整源代码可在 `_ " +"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " +"CIFAR-10 子集,或者增加客户端的数量。" + #: ../../source/how-to-implement-strategies.rst:2 msgid "Implement strategies" msgstr "实施策略" @@ -6928,7 +6511,6 @@ msgid "Install stable release" msgstr "安装稳定版" #: ../../source/how-to-install-flower.rst:14 -#: ../../source/how-to-upgrade-to-flower-next.rst:66 #, fuzzy msgid "Using pip" msgstr "使用 pip" @@ -7039,444 +6621,344 @@ msgid "" "should be installed with the ``simulation`` extra:" msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "监控模拟" +#: ../../source/how-to-run-simulations.rst:22 +msgid "Run simulations" +msgstr "运行模拟" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/how-to-run-simulations.rst:24 +#, fuzzy msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Simulating Federated Learning workloads is useful for a multitude of use " +"cases: you might want to run your workload on a large cohort of clients " +"without having to source, configure, and manage a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without going through " +"a complex setup process; you might want to validate your algorithm in " +"different scenarios at varying levels of data and system heterogeneity, " +"client availability, privacy budgets, etc. These are among some of the " +"use cases where simulating FL workloads makes sense." msgstr "" -"Flower 允许您在运行模拟时监控系统资源。此外,Flower " -"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" +" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " +"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/how-to-monitor-simulation.rst:9 +#: ../../source/how-to-run-simulations.rst:33 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" - -#: ../../source/how-to-monitor-simulation.rst:13 -msgid "Downloads" -msgstr "下载" +"Flower's ``Simulation Engine`` schedules, launches, and manages " +"|clientapp_link|_ instances. It does so through a ``Backend``, which " +"contains several workers (i.e., Python processes) that can execute a " +"``ClientApp`` by passing it a |context_link|_ and a |message_link|_. " +"These ``ClientApp`` objects are identical to those used by Flower's " +"`Deployment Engine `_, making " +"alternating between *simulation* and *deployment* an effortless process. " +"The execution of ``ClientApp`` objects through Flower's ``Simulation " +"Engine`` is:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:19 +#: ../../source/how-to-run-simulations.rst:41 +#, fuzzy msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"**Resource-aware**: Each backend worker executing ``ClientApp``\\s gets " +"assigned a portion of the compute and memory on your system. You can " +"define these at the beginning of the simulation, allowing you to control " +"the degree of parallelism of your simulation. For a fixed total pool of " +"resources, the fewer the resources per backend worker, the more " +"``ClientApps`` can run concurrently on the same hardware." msgstr "" -"`Prometheus `_ 用于收集数据,而 `Grafana " -"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " -"`_ 紧密集成。" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " +"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/how-to-monitor-simulation.rst:23 +#: ../../source/how-to-run-simulations.rst:46 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" - -#: ../../source/how-to-monitor-simulation.rst:26 -msgid "If you are on an M1 Mac, it should be:" -msgstr "如果你使用的是 M1 Mac,应该是这样:" - -#: ../../source/how-to-monitor-simulation.rst:33 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "在上一代英特尔 Mac 设备上,应该是这样:" +"**Batchable**: When there are more ``ClientApps`` to execute than backend" +" workers, ``ClientApps`` are queued and executed as soon as resources are" +" freed. This means that ``ClientApps`` are typically executed in batches " +"of N, where N is the number of backend workers." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:40 +#: ../../source/how-to-run-simulations.rst:50 +#, fuzzy msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" -msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" +"**Self-managed**: This means that you, as a user, do not need to launch " +"``ClientApps`` manually; instead, the ``Simulation Engine``'s internals " +"orchestrates the execution of all ``ClientApp``\\s." +msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" -#: ../../source/how-to-monitor-simulation.rst:51 +#: ../../source/how-to-run-simulations.rst:53 +#, fuzzy msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" -msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" +"**Ephemeral**: This means that a ``ClientApp`` is only materialized when " +"it is required by the application (e.g., to do `fit() `_). The object is destroyed afterward, " +"releasing the resources it was assigned and allowing other clients to " +"participate." +msgstr "" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " +")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/how-to-monitor-simulation.rst:67 +#: ../../source/how-to-run-simulations.rst:60 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" -msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" +"You can preserve the state (e.g., internal variables, parts of an ML " +"model, intermediate results) of a ``ClientApp`` by saving it to its " +"``Context``. Check the `Designing Stateful Clients `_ guide for a complete walkthrough." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:78 +#: ../../source/how-to-run-simulations.rst:65 +#, fuzzy msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" +"The ``Simulation Engine`` delegates to a ``Backend`` the role of spawning" +" and managing ``ClientApps``. The default backend is the ``RayBackend``, " +"which uses `Ray `_, an open-source framework for " +"scalable Python workloads. In particular, each worker is an `Actor " +"`_ capable of " +"spawning a ``ClientApp`` given its ``Context`` and a ``Message`` to " +"process." +msgstr "" +":code:`VirtualClientEngine`使用`Ray " +"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" +" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" -#: ../../source/how-to-monitor-simulation.rst:94 -msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" +#: ../../source/how-to-run-simulations.rst:73 +msgid "Launch your Flower simulation" +msgstr "启动 Flower 模拟" -#: ../../source/how-to-monitor-simulation.rst:98 -msgid "Tracking metrics" -msgstr "跟踪指标" +#: ../../source/how-to-run-simulations.rst:75 +msgid "" +"Running a simulation is straightforward; in fact, it is the default mode " +"of operation for |flwr_run_link|_. Therefore, running Flower simulations " +"primarily requires you to first define a ``ClientApp`` and a " +"``ServerApp``. A convenient way to generate a minimal but fully " +"functional Flower app is by means of the |flwr_new_link|_ command. There " +"are multiple templates to choose from. The example below uses the " +"``PyTorch`` template." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:100 +#: ../../source/how-to-run-simulations.rst:83 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" +"If you haven't already, install Flower via ``pip install -U flwr`` in a " +"Python environment." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-run-simulations.rst:91 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "开始模拟时,请在 Python 代码中加入以下参数。" +"Then, follow the instructions shown after completing the |flwr_new_link|_" +" command. When you execute |flwr_run_link|_, you'll be using the " +"``Simulation Engine``." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "Now, you are ready to start your workload." -msgstr "现在,您可以开始工作了。" +#: ../../source/how-to-run-simulations.rst:94 +msgid "" +"If we take a look at the ``pyproject.toml`` that was generated from the " +"|flwr_new_link|_ command (and loaded upon |flwr_run_link|_ execution), we" +" see that a *default* federation is defined. It sets the number of " +"supernodes to 10." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "模拟启动后不久,您就会在终端中看到以下日志:" +"You can modify the size of your simulations by adjusting ``options.num-" +"supernodes``." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:127 -#, fuzzy -msgid "You can look at everything at http://127.0.0.1:8265 ." -msgstr "您可以在 ``_ 查看所有内容。" - -#: ../../source/how-to-monitor-simulation.rst:129 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" +#: ../../source/how-to-run-simulations.rst:109 +msgid "Simulation examples" +msgstr "模拟示例" -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-run-simulations.rst:111 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"In addition to the quickstart tutorials in the documentation (e.g., " +"`quickstart PyTorch Tutorial `_, " +"`quickstart JAX Tutorial `_), most examples" +" in the Flower repository are simulation-ready." msgstr "" -"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " -"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " -"Grafana。" -#: ../../source/how-to-monitor-simulation.rst:137 +#: ../../source/how-to-run-simulations.rst:116 #, fuzzy msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port ``3000`` on your" -" machine as long as they are running." -msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" - -#: ../../source/how-to-monitor-simulation.rst:147 -msgid "Resource allocation" -msgstr "资源分配" - -#: ../../source/how-to-monitor-simulation.rst:149 -msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" +"`Quickstart TensorFlow/Keras " +"`_." +msgstr "" +"`TensorFlow快速入门 (代码) `_" -#: ../../source/how-to-monitor-simulation.rst:152 +#: ../../source/how-to-run-simulations.rst:118 +#, fuzzy msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"`Quickstart PyTorch `_" msgstr "" -"最初,模拟(由 Ray " -"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" - -#: ../../source/how-to-monitor-simulation.rst:164 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "在 Google Colab 中,您看到的结果可能与此类似:" +"`PyTorch快速入门 (代码) `_" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-run-simulations.rst:120 +#, fuzzy msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" - -#: ../../source/how-to-monitor-simulation.rst:195 -msgid "Let’s also specify the resource for a single client." -msgstr "我们还可以为单个客户指定资源。" +"`Advanced PyTorch `_" +msgstr "" +"`PyTorch快速入门 (代码) `_" -#: ../../source/how-to-monitor-simulation.rst:225 +#: ../../source/how-to-run-simulations.rst:122 +#, fuzzy msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" +"`Quickstart MLX `_" +msgstr "" +"`PyTorch快速入门 (代码) `_" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-run-simulations.rst:123 #, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " -"two clients and therefore enable them to run concurrently. Be careful not" -" to require more resources than available. If you specified " -"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" -" GPUs but decided to set 1 in ``ray_init_args``)." +"`ViT fine-tuning `_" msgstr "" -"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " -"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " -"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" - -#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "常见问题" - -#: ../../source/how-to-monitor-simulation.rst:237 -msgid "Q: I don't see any metrics logged." -msgstr "问:我没有看到任何指标记录。" +"`PyTorch快速入门 (代码) `_" -#: ../../source/how-to-monitor-simulation.rst:239 +#: ../../source/how-to-run-simulations.rst:125 +#, fuzzy msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" +"The complete list of examples can be found in `the Flower GitHub " +"`_." +msgstr "" +"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-monitor-simulation.rst:243 -msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" +#: ../../source/how-to-run-simulations.rst:131 +#, fuzzy +msgid "Defining ``ClientApp`` resources" +msgstr "分配客户端资源" -#: ../../source/how-to-monitor-simulation.rst:246 +#: ../../source/how-to-run-simulations.rst:133 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" +"By default, the ``Simulation Engine`` assigns two CPU cores to each " +"backend worker. This means that if your system has 10 CPU cores, five " +"backend workers can be running in parallel, each executing a different " +"``ClientApp`` instance." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:252 +#: ../../source/how-to-run-simulations.rst:137 #, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"http://127.0.0.1:8265." -msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" +"More often than not, you would probably like to adjust the resources your" +" ``ClientApp`` gets assigned based on the complexity (i.e., compute and " +"memory footprint) of your workload. You can do so by adjusting the " +"backend resources for your federation." +msgstr "" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " +"`client_resources` 设置为 `start_simulation `_ 。Ray " +"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" -#: ../../source/how-to-monitor-simulation.rst:254 +#: ../../source/how-to-run-simulations.rst:143 +#, python-format msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" - -#: ../../source/how-to-monitor-simulation.rst:257 -msgid "Resources" -msgstr "资源" +"Note that the resources the backend assigns to each worker (and hence to " +"each ``ClientApp`` being executed) are assigned in a *soft* manner. This " +"means that the resources are primarily taken into account in order to " +"control the degree of parallelism at which ``ClientApp`` instances should" +" be executed. Resource assignment is **not strict**, meaning that if you " +"specified your ``ClientApp`` is assumed to make use of 25% of the " +"available VRAM but it ends up using 50%, it might cause other " +"``ClientApp`` instances to crash throwing an out-of-memory (OOM) error." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:259 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" -"started.html" -msgstr "Ray 仪表盘: ``_" - -#: ../../source/how-to-monitor-simulation.rst:261 -#, fuzzy -msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +"Customizing resources can be done directly in the ``pyproject.toml`` of " +"your app." msgstr "" -"Ray 指标: ``_" - -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "运行模拟" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-run-simulations.rst:160 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"With the above backend settings, your simulation will run as many " +"``ClientApps`` in parallel as CPUs you have in your system. GPU resources" +" for your ``ClientApp`` can be assigned by specifying the **ratio** of " +"VRAM each should make use of." msgstr "" -"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" -" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " -"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/how-to-run-simulations.rst:19 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:173 msgid "" -"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -"``VirtualClientEngine`` are:" +"If you are using TensorFlow, you need to `enable memory growth " +"`_ so " +"multiple ``ClientApp`` instances can share a GPU. This needs to be done " +"before launching the simulation. To do so, set the environment variable " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"``." msgstr "" -":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" -" `_启动的客户端),因为它们可以通过创建一个继承自 " -"`flwr.client.NumPyClient `_ " -"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" -#: ../../source/how-to-run-simulations.rst:26 +#: ../../source/how-to-run-simulations.rst:179 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"Let's see how the above configuration results in a different number of " +"``ClientApps`` running in parallel depending on the resources available " +"in your system. If your system has:" msgstr "" -"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " -"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/how-to-run-simulations.rst:31 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:183 +#, python-format msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to ``VirtualClientEngine``'s " -"internals." -msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" +"10x CPUs and 1x GPU: at most 4 ``ClientApps`` will run in parallel since " +"each requires 25% of the available VRAM." +msgstr "" -#: ../../source/how-to-run-simulations.rst:33 +#: ../../source/how-to-run-simulations.rst:185 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"10x CPUs and 2x GPUs: at most 8 ``ClientApps`` will run in parallel " +"(VRAM-limited)." msgstr "" -"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " -")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/how-to-run-simulations.rst:38 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:186 msgid "" -"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." +"6x CPUs and 4x GPUs: at most 6 ``ClientApps`` will run in parallel (CPU-" +"limited)." msgstr "" -":code:`VirtualClientEngine`使用`Ray " -"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" -" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" - -#: ../../source/how-to-run-simulations.rst:45 -msgid "Launch your Flower simulation" -msgstr "启动 Flower 模拟" -#: ../../source/how-to-run-simulations.rst:47 +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"10x CPUs but 0x GPUs: you won't be able to run the simulation since not " +"even the resources for a single ``ClientApp`` can be met." msgstr "" -"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " -"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" - -#: ../../source/how-to-run-simulations.rst:73 -msgid "VirtualClientEngine resources" -msgstr "虚拟客户端引擎资源" -#: ../../source/how-to-run-simulations.rst:75 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:190 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -"``ray_init_args`` input argument to ``start_simulation`` which the VCE " -"internally passes to Ray's ``ray.init`` command. For a complete list of " -"settings you can configure check the `ray.init " -"`_" -" documentation. Do not set ``ray_init_args`` if you want the VCE to use " -"all your system's CPUs and GPUs." -msgstr "" -"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " -"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " -":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" -" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " -"GPU,请不要设置 :code:`ray_init_args`。" - -#: ../../source/how-to-run-simulations.rst:97 -msgid "Assigning client resources" -msgstr "分配客户端资源" +"A generalization of this is given by the following equation. It gives the" +" maximum number of ``ClientApps`` that can be executed in parallel on " +"available CPU cores (SYS_CPUS) and VRAM (SYS_GPUS)." +msgstr "" -#: ../../source/how-to-run-simulations.rst:99 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:194 msgid "" -"By default the ``VirtualClientEngine`` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has " -"10 cores, that many virtual clients can be concurrently running." +"N = \\min\\left(\\left\\lfloor \\frac{\\text{SYS_CPUS}}{\\text{num_cpus}}" +" \\right\\rfloor, \\left\\lfloor " +"\\frac{\\text{SYS_GPUS}}{\\text{num_gpus}} \\right\\rfloor\\right)" msgstr "" -"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " -"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" -#: ../../source/how-to-run-simulations.rst:103 +#: ../../source/how-to-run-simulations.rst:198 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"Both ``num_cpus`` (an integer higher than 1) and ``num_gpus`` (a non-" +"negative real number) should be set on a per ``ClientApp`` basis. If, for" +" example, you want only a single ``ClientApp`` to run on each GPU, then " +"set ``num_gpus=1.0``. If, for example, a ``ClientApp`` requires access to" +" two whole GPUs, you'd set ``num_gpus=2``." msgstr "" -"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " -"`client_resources` 设置为 `start_simulation `_ 。Ray " -"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" - -#: ../../source/how-to-run-simulations.rst:110 -#, fuzzy -msgid "``num_cpus`` indicates the number of CPU cores a client would get." -msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" - -#: ../../source/how-to-run-simulations.rst:111 -#, fuzzy -msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." -msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" -#: ../../source/how-to-run-simulations.rst:113 -msgid "Let's see a few examples:" -msgstr "让我们来看几个例子:" - -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:203 #, fuzzy msgid "" -"While the ``client_resources`` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " +"While the ``options.backend.client-resources`` can be used to control the" +" degree of concurrency in your simulations, this does not stop you from " +"running hundreds or even thousands of clients in the same round and " +"having orders of magnitude more *dormant* (i.e., not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " -"client sampled by the strategy) and then will execute them in a resource-" -"aware manner in batches of 8." +" system can only accommodate 8 clients concurrently. The ``Simulation " +"Engine`` will schedule 100 ``ClientApps`` to run and then will execute " +"them in a resource-aware manner in batches of 8." msgstr "" "虽然 :code:`client_resources` 可用来控制 FL " "模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " @@ -7484,320 +6966,324 @@ msgstr "" "个客户端。:code:`VirtualClientEngine` 将安排运行 100 " "个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" -#: ../../source/how-to-run-simulations.rst:140 +#: ../../source/how-to-run-simulations.rst:212 +#, fuzzy +msgid "Simulation Engine resources" +msgstr "虚拟客户端引擎资源" + +#: ../../source/how-to-run-simulations.rst:214 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"By default, the ``Simulation Engine`` has **access to all system " +"resources** (i.e., all CPUs, all GPUs). However, in some settings, you " +"might want to limit how many of your system resources are used for " +"simulation. You can do this in the ``pyproject.toml`` of your app by " +"setting the ``options.backend.init_args`` variable." msgstr "" -"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " -"`_。" -#: ../../source/how-to-run-simulations.rst:145 -msgid "Simulation examples" -msgstr "模拟示例" +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"With the above setup, the Backend will be initialized with a single CPU " +"and GPU. Therefore, even if more CPUs and GPUs are available in your " +"system, they will not be used for the simulation. The example above " +"results in a single ``ClientApp`` running at any given point." +msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:233 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"For a complete list of settings you can configure, check the `ray.init " +"`_" +" documentation." msgstr "" -"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " -"`_ 中提供。您也可以在 Google Colab 上运行它们:" -#: ../../source/how-to-run-simulations.rst:151 +#: ../../source/how-to-run-simulations.rst:236 +msgid "For the highest performance, do not set ``options.backend.init_args``." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:239 +#, fuzzy +msgid "Simulation in Colab/Jupyter" +msgstr "运行模拟" + +#: ../../source/how-to-run-simulations.rst:241 +msgid "" +"The preferred way of running simulations should always be " +"|flwr_run_link|_. However, the core functionality of the ``Simulation " +"Engine`` can be used from within a Google Colab or Jupyter environment by" +" means of `run_simulation `_." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:262 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"With ``run_simulation``, you can also control the amount of resources for" +" your ``ClientApp`` instances. Do so by setting ``backend_config``. If " +"unset, the default resources are assigned (i.e., 2xCPUs per ``ClientApp``" +" and no GPU)." msgstr "" -"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" -#: ../../source/how-to-run-simulations.rst:154 +#: ../../source/how-to-run-simulations.rst:273 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"Refer to the `30 minutes Federated AI Tutorial " +"`_ for a complete example on how to " +"run Flower Simulations in Colab." msgstr "" -"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" -#: ../../source/how-to-run-simulations.rst:159 +#: ../../source/how-to-run-simulations.rst:280 msgid "Multi-node Flower simulations" msgstr "多节点 Flower 模拟" -#: ../../source/how-to-run-simulations.rst:161 +#: ../../source/how-to-run-simulations.rst:282 #, fuzzy msgid "" -"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure" -" that you:" +"Flower's ``Simulation Engine`` allows you to run FL simulations across " +"multiple compute nodes so that you're not restricted to running " +"simulations on a _single_ machine. Before starting your multi-node " +"simulation, ensure that you:" msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" -#: ../../source/how-to-run-simulations.rst:164 -msgid "Have the same Python environment in all nodes." +#: ../../source/how-to-run-simulations.rst:286 +#, fuzzy +msgid "Have the same Python environment on all nodes." msgstr "所有节点都有相同的 Python 环境。" -#: ../../source/how-to-run-simulations.rst:165 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/how-to-run-simulations.rst:287 +#, fuzzy +msgid "Have a copy of your code on all nodes." msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" -#: ../../source/how-to-run-simulations.rst:166 +#: ../../source/how-to-run-simulations.rst:288 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" -msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" - -#: ../../source/how-to-run-simulations.rst:168 -#, fuzzy -msgid "" -"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " -"``VirtualClientEngine`` attaches to a running Ray instance." +"Have a copy of your dataset on all nodes. If you are using partitions " +"from `Flower Datasets `_, ensure the " +"partitioning strategy its parameterization are the same. The expectation " +"is that the i-th dataset partition is identical in all nodes." msgstr "" -"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " -":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" -#: ../../source/how-to-run-simulations.rst:171 +#: ../../source/how-to-run-simulations.rst:292 #, fuzzy msgid "" -"Start Ray on you head node: on the terminal type ``ray start --head``. " +"Start Ray on your head node: on the terminal, type ``ray start --head``. " "This command will print a few lines, one of which indicates how to attach" " other nodes to the head node." msgstr "" "在头部节点上启动 Ray:在终端上输入 :code:`raystart--" "head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" -#: ../../source/how-to-run-simulations.rst:174 +#: ../../source/how-to-run-simulations.rst:295 #, fuzzy msgid "" "Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -"``ray start --address='192.168.1.132:6379'``" +"starting the head and execute it on the terminal of a new node (before " +"executing |flwr_run_link|_). For example: ``ray start " +"--address='192.168.1.132:6379'``. Note that to be able to attach nodes to" +" the head node they should be discoverable by each other." msgstr "" "将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " "--address='192.168.1.132:6379'`" -#: ../../source/how-to-run-simulations.rst:178 +#: ../../source/how-to-run-simulations.rst:300 +#, fuzzy msgid "" "With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"would if the simulation were running on a single node. In other words:" msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" -#: ../../source/how-to-run-simulations.rst:181 +#: ../../source/how-to-run-simulations.rst:308 #, fuzzy msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command ``ray stop`` in each node's terminal " -"(including the head node)." +"Once your simulation is finished, if you'd like to dismantle your " +"cluster, you simply need to run the command ``ray stop`` in each node's " +"terminal (including the head node)." msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" -#: ../../source/how-to-run-simulations.rst:185 -msgid "Multi-node simulation good-to-know" -msgstr "了解多节点模拟" - -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" - -#: ../../source/how-to-run-simulations.rst:189 -#, fuzzy -msgid "" -"User ``ray status`` to check all nodes connected to your head node as " -"well as the total resources available to the ``VirtualClientEngine``." -msgstr "" -"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " -"可用的总资源。" - -#: ../../source/how-to-run-simulations.rst:192 +#: ../../source/how-to-run-simulations.rst:313 #, fuzzy msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -"``VirtualClientEngine`` can schedule as many `virtual` clients as that " -"node can possible run. In some settings you might want to exclude certain" -" resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in " -"any ``ray start`` command (including when starting the head)" +"When attaching a new node to the head, all its resources (i.e., all CPUs," +" all GPUs) will be visible by the head node. This means that the " +"``Simulation Engine`` can schedule as many ``ClientApp`` instances as " +"that node can possibly run. In some settings, you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"``--num-cpus=`` and/or ``--num-" +"gpus=`` in any ``ray start`` command (including when " +"starting the head)." msgstr "" "将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" " 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" " start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" "gpus=`" -#: ../../source/how-to-run-simulations.rst:202 -msgid "Considerations for simulations" -msgstr "模拟的注意事项" +#: ../../source/how-to-run-simulations.rst:322 +#, fuzzy +msgid "FAQ for Simulations" +msgstr "运行模拟" + +#: ../../source/how-to-run-simulations.rst +msgid "Can I make my ``ClientApp`` instances stateful?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:206 +#: ../../source/how-to-run-simulations.rst:326 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" +"Yes. Use the ``state`` attribute of the |context_link|_ object that is " +"passed to the ``ClientApp`` to save variables, parameters, or results to " +"it. Read the `Designing Stateful Clients `_ guide for a complete walkthrough." +msgstr "" + +#: ../../source/how-to-run-simulations.rst +msgid "Can I run multiple simulations on the same machine?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:209 +#: ../../source/how-to-run-simulations.rst:330 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Yes, but bear in mind that each simulation isn't aware of the resource " +"usage of the other. If your simulations make use of GPUs, consider " +"setting the ``CUDA_VISIBLE_DEVICES`` environment variable to make each " +"simulation use a different set of the available GPUs. Export such an " +"environment variable before starting |flwr_run_link|_." msgstr "" -"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" -" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " -"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" -#: ../../source/how-to-run-simulations.rst:217 -msgid "GPU resources" -msgstr "GPU 资源" +#: ../../source/how-to-run-simulations.rst +msgid "" +"Do the CPU/GPU resources set for each ``ClientApp`` restrict how much " +"compute/memory these make use of?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:219 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:334 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -"``num_gpus`` in ``client_resources``. This being said, Ray (used " -"internally by the VCE) is by default:" +"No. These resources are exclusively used by the simulation backend to " +"control how many workers can be created on startup. Let's say N backend " +"workers are launched, then at most N ``ClientApp`` instances will be " +"running in parallel. It is your responsibility to ensure ``ClientApp`` " +"instances have enough resources to execute their workload (e.g., fine-" +"tune a transformer model)." msgstr "" -"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " -"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" -#: ../../source/how-to-run-simulations.rst:222 -#, fuzzy +#: ../../source/how-to-run-simulations.rst +msgid "My ``ClientApp`` is triggering OOM on my GPU. What should I do?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:338 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set ``num_gpus=0.5`` and you have two GPUs in your system with different" -" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " -"concurrently." +"It is likely that your `num_gpus` setting, which controls the number of " +"``ClientApp`` instances that can share a GPU, is too low (meaning too " +"many ``ClientApps`` share the same GPU). Try the following:" msgstr "" -"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " -"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" -#: ../../source/how-to-run-simulations.rst:225 +#: ../../source/how-to-run-simulations.rst:340 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" +"Set your ``num_gpus=1``. This will make a single ``ClientApp`` run on a " +"GPU." +msgstr "" -#: ../../source/how-to-run-simulations.rst:228 +#: ../../source/how-to-run-simulations.rst:341 +msgid "Inspect how much VRAM is being used (use ``nvidia-smi`` for this)." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:342 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Based on the VRAM you see your single ``ClientApp`` using, calculate how " +"many more would fit within the remaining VRAM. One divided by the total " +"number of ``ClientApps`` is the ``num_gpus`` value you should set." msgstr "" -"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" -#: ../../source/how-to-run-simulations.rst:231 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:344 +msgid "Refer to :ref:`clientappresources` for more details." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:346 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +"If your ``ClientApp`` is using TensorFlow, make sure you are exporting " +"``TF_FORCE_GPU_ALLOW_GROWTH=\"1\"`` before starting your simulation. For " +"more details, check." msgstr "" -"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " -":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" -#: ../../source/how-to-run-simulations.rst:235 -#, fuzzy +#: ../../source/how-to-run-simulations.rst msgid "" -"In addition, the GPU resource limits passed to ``client_resources`` are " -"not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"How do I know what's the right ``num_cpus`` and ``num_gpus`` for my " +"``ClientApp``?" msgstr "" -"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" -" VRAM 超过启动模拟时指定的比例。" -#: ../../source/how-to-run-simulations.rst:240 -msgid "TensorFlow with GPUs" -msgstr "使用 GPU 的 TensorFlow" +#: ../../source/how-to-run-simulations.rst:350 +msgid "" +"A good practice is to start by running the simulation for a few rounds " +"with higher ``num_cpus`` and ``num_gpus`` than what is really needed " +"(e.g., ``num_cpus=8`` and, if you have a GPU, ``num_gpus=1``). Then " +"monitor your CPU and GPU utilization. For this, you can make use of tools" +" such as ``htop`` and ``nvidia-smi``. If you see overall resource " +"utilization remains low, try lowering ``num_cpus`` and ``num_gpus`` " +"(recall this will make more ``ClientApp`` instances run in parallel) " +"until you see a satisfactory system resource utilization." +msgstr "" -#: ../../source/how-to-run-simulations.rst:242 +#: ../../source/how-to-run-simulations.rst:352 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"Note that if the workload on your ``ClientApp`` instances is not " +"homogeneous (i.e., some come with a larger compute or memory footprint), " +"you'd probably want to focus on those when coming up with a good value " +"for ``num_gpus`` and ``num_cpus``." msgstr "" -"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" -" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " -"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " -"`_来禁用这一默认行为。" -#: ../../source/how-to-run-simulations.rst:249 -#, fuzzy +#: ../../source/how-to-run-simulations.rst +msgid "Can I assign different resources to each ``ClientApp`` instance?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:356 +msgid "" +"No. All ``ClientApp`` objects are assumed to make use of the same " +"``num_cpus`` and ``num_gpus``. When setting these values (refer to " +":ref:`clientappresources` for more details), ensure the ``ClientApp`` " +"with the largest memory footprint (either RAM or VRAM) can run in your " +"system with others like it in parallel." +msgstr "" + +#: ../../source/how-to-run-simulations.rst msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " -"order to specify a function to be executed upon actor initialization. In " -"this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"Can I run single simulation accross multiple compute nodes (e.g. GPU " +"servers)?" msgstr "" -"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " -":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" -" TF 工作负载的 GPU 增长,它看起来如下:" -#: ../../source/how-to-run-simulations.rst:272 +#: ../../source/how-to-run-simulations.rst:360 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Yes. If you are using the ``RayBackend`` (the *default* backend) you can " +"first interconnect your nodes through Ray's cli and then launch the " +"simulation. Refer to :ref:`multinodesimulations` for a step-by-step " +"guide." msgstr "" -"这正是 \"Tensorflow/Keras 模拟 " -"`_\"示例中使用的机制。" -#: ../../source/how-to-run-simulations.rst:276 -msgid "Multi-node setups" -msgstr "多节点设置" +#: ../../source/how-to-run-simulations.rst +msgid "" +"My ``ServerApp`` also needs to make use of the GPU (e.g., to do " +"evaluation of the *global model* after aggregation). Is this GPU usage " +"taken into account by the ``Simulation Engine``?" +msgstr "" -#: ../../source/how-to-run-simulations.rst:278 +#: ../../source/how-to-run-simulations.rst:364 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"No. The ``Simulation Engine`` only manages ``ClientApps`` and therefore " +"is only aware of the system resources they require. If your ``ServerApp``" +" makes use of substantial compute or memory resources, factor that into " +"account when setting ``num_cpus`` and ``num_gpus``." msgstr "" -"VCE 目前不提供控制特定 \"虚拟 " -"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " -"FL " -"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" -" nfs 或数据库)来避免数据重复。" -#: ../../source/how-to-run-simulations.rst:286 +#: ../../source/how-to-run-simulations.rst +msgid "" +"Can I indicate on what resource a specific instance of a ``ClientApp`` " +"should run? Can I do resource placement?" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:368 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Currently, the placement of ``ClientApp`` instances is managed by the " +"``RayBackend`` (the only backend available as of ``flwr==1.13.0``) and " +"cannot be customized. Implementing a *custom* backend would be a way of " +"achieving resource placement." msgstr "" -"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " -"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" -" \"状态\"。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +#, fuzzy +msgid "Save and Load Model Checkpoints" msgstr "保存和加载模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 @@ -7808,7 +7294,8 @@ msgid "" msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" +#, fuzzy +msgid "Model Checkpointing" msgstr "模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 @@ -7830,11 +7317,12 @@ msgstr "" "策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " ":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 -msgid "Save and load PyTorch checkpoints" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:58 +#, fuzzy +msgid "Save and Load PyTorch Checkpoints" msgstr "保存和加载 PyTorch 检查点" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:60 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -7847,14 +7335,14 @@ msgstr "" "函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " "``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:103 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" " latest one:" msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:116 #, fuzzy msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" @@ -7863,11 +7351,32 @@ msgstr "" "在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " "``initial_parameters` 中。" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:119 +msgid "" +"Alternatively, we can save and load the model updates during evaluation " +"phase by overriding ``evaluate()`` or ``aggregate_evaluate()`` method of " +"the strategy (``FedAvg``). Checkout the details in `Advanced PyTorch " +"Example `_ and `Advanced TensorFlow Example " +"`_." +msgstr "" + #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" msgstr "升级至 Flower 1.0" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:6 +msgid "" +"This guide is for users who have already worked with Flower 0.x and want " +"to upgrade to Flower 1.0. Newer versions of Flower (1.13 and later) are " +"based on a new architecture and not covered in this guide. After " +"upgrading Flower 0.x projects to Flower 1.0, please refer to " +":doc:`Upgrade to Flower 1.13 ` to make " +"your project compatible with the lastest version of Flower." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:13 msgid "" "Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" " foundation for future growth. Compared to Flower 0.19 (and other 0.x " @@ -7877,34 +7386,34 @@ msgstr "" "Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " "系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 -#: ../../source/how-to-upgrade-to-flower-next.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:49 msgid "Install update" msgstr "安装更新" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:21 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "pip: add ``-U`` when installing." msgstr "pip: 安装时添加 ``-U``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:25 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -7913,11 +7422,11 @@ msgstr "" "Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" " 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:32 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -7925,32 +7434,32 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " "}``(当使用``start_simulation``时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 -#: ../../source/how-to-upgrade-to-flower-next.rst:121 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:88 msgid "Required changes" msgstr "所需变更" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "The following breaking changes require manual updates." msgstr "以下更改需要手动更新。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:42 msgid "General" msgstr "一般情况" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:44 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -7959,12 +7468,12 @@ msgstr "" "Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "客户端" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -7972,7 +7481,7 @@ msgstr "" "NumPyClient的子类:将``def get_parameters(self):```改为``def " "get_parameters(self,config):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -7980,11 +7489,11 @@ msgstr "" "客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " "GetParametersIns):\"" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "策略 / ``start_server`` / ``start_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -7992,7 +7501,7 @@ msgstr "" "向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " "dictionary)。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -8000,7 +7509,7 @@ msgstr "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -8010,13 +7519,13 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:70 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:72 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -8026,19 +7535,19 @@ msgstr "" "删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " "参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:75 msgid "Rename parameter/ndarray conversion functions:" msgstr "重命名参数/数组转换函数:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:78 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -8052,23 +7561,23 @@ msgstr "" "``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " "`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "重命名内置策略参数(例如,`FedAvg``):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:92 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -8078,11 +7587,11 @@ msgstr "" "``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" " 和 ``evaluate_fn``。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:95 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:97 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8090,7 +7599,7 @@ msgstr "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:99 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -8100,11 +7609,11 @@ msgstr "" "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "Custom strategies" msgstr "定制策略" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -8117,13 +7626,13 @@ msgstr "" "BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " "EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:112 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8131,7 +7640,7 @@ msgstr "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:```" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8139,17 +7648,17 @@ msgstr "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:118 msgid "Optional improvements" msgstr "可选的改进措施" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:120 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -8159,7 +7668,7 @@ msgstr "" "删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " "\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:126 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -8169,12 +7678,12 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 -#: ../../source/how-to-upgrade-to-flower-next.rst:349 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:130 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:451 msgid "Further help" msgstr "更多帮助" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:132 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -8186,97 +7695,73 @@ msgstr "" " 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " "`_ 并使用 \"#questions``\"。" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:2 #, fuzzy -msgid "Upgrade to Flower Next" +msgid "Upgrade to Flower 1.13" msgstr "升级至 Flower 1.0" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:4 #, fuzzy msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " +"Welcome to the migration guide for updating Flower to Flower 1.13! " "Whether you're a seasoned user or just getting started, this guide will " "help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +" latest features and improvements in Flower 1.13." msgstr "" "欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " "Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" -#: ../../source/how-to-upgrade-to-flower-next.rst:11 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.13.rst:10 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"This guide shows how to make pre-``1.13`` Flower code compatible with " +"Flower 1.13 (and later) with only minimal code changes." msgstr "" -"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " -"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" -#: ../../source/how-to-upgrade-to-flower-next.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:13 #, fuzzy msgid "Let's dive in!" msgstr "让我们深入了解一下!" -#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:51 #, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " +"Here's how to update an existing installation of Flower to Flower 1.13 " "with ``pip``:" msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/how-to-upgrade-to-flower-next.rst:74 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:57 #, fuzzy -msgid "or if you need Flower Next with simulation:" +msgid "or if you need Flower 1.13 with simulation:" msgstr "启动 Flower 模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:63 #, fuzzy msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" -#: ../../source/how-to-upgrade-to-flower-next.rst:90 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:73 #, fuzzy msgid "or ``pyproject.toml``:" msgstr "或 ``pyproject.toml```:" -#: ../../source/how-to-upgrade-to-flower-next.rst:101 -#, fuzzy -msgid "Using Poetry" -msgstr "使用 pip" - -#: ../../source/how-to-upgrade-to-flower-next.rst:103 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.13.rst:90 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"Starting with Flower 1.8, the *infrastructure* and *application layers* " +"have been decoupled. Flower 1.13 enforces this separation further. Among " +"other things, this allows you to run the exact same code in a simulation " +"as in a real deployment." msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" - -#: ../../source/how-to-upgrade-to-flower-next.rst:106 -#, fuzzy -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/how-to-upgrade-to-flower-next.rst:123 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:94 #, fuzzy msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"Instead of starting a client in code via ``start_client()``, you create a" +" |clientapp_link|_. Instead of starting a server in code via " +"``start_server()``, you create a |serverapp_link|_. Both ``ClientApp`` " +"and ``ServerApp`` are started by the long-running components of the " +"server and client: the `SuperLink` and `SuperNode`, respectively." msgstr "" "在 Flower Next " "中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" @@ -8284,141 +7769,216 @@ msgstr "" "并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" " Flower Next 方式运行项目:" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:102 +msgid "" +"For more details on SuperLink and SuperNode, please see the " +"|flower_architecture_link|_ ." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:105 +msgid "" +"The following non-breaking changes require manual updates and allow you " +"to run your project both in the traditional (now deprecated) way and in " +"the new (recommended) Flower 1.13 way:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:110 #, fuzzy msgid "|clientapp_link|_" msgstr "客户端" -#: ../../source/how-to-upgrade-to-flower-next.rst:134 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:112 #, fuzzy msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"via ``start_client()``. Here's an example:" msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:146 #, fuzzy msgid "|serverapp_link|_" msgstr "服务器" -#: ../../source/how-to-upgrade-to-flower-next.rst:159 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:148 #, fuzzy msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"the server via ``start_server()``. Here's an example:" msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:180 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:185 #, fuzzy msgid "Deployment" msgstr "调配" -#: ../../source/how-to-upgrade-to-flower-next.rst:182 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:187 #, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"In a terminal window, start the SuperLink using |flower_superlink_link|_." +" Then, in two additional terminal windows, start two SuperNodes using " +"|flower_supernode_link|_ (2x). There is no need to directly run " +"``client.py`` and ``server.py`` as Python scripts." msgstr "" "在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " "之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " "`server.py` 作为 Python 脚本执行。" -#: ../../source/how-to-upgrade-to-flower-next.rst:185 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:190 #, fuzzy msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"Here's an example to start the server without HTTPS (insecure mode, only " +"for prototyping):" msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:195 +msgid "" +"For a comprehensive walk-through on how to deploy Flower using Docker, " +"please refer to the :doc:`docker/index` guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:218 #, fuzzy msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"Here's another example to start both SuperLink and SuperNodes with HTTPS." +" Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile``" +" command line options to pass paths to (CA certificate, server " +"certificate, and server private key)." msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:229 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:246 #, fuzzy -msgid "Simulation in CLI" +msgid "Simulation (CLI)" msgstr "运行模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:231 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:248 #, fuzzy msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"|serverapp_link|_, respectively. There is no need to use " +"``start_simulation()`` anymore. Here's an example:" msgstr "" "分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " "|startsim_link|_。下面是一个示例:" -#: ../../source/how-to-upgrade-to-flower-next.rst:264 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:253 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:389 +msgid "" +"For a comprehensive guide on how to setup and run Flower simulations " +"please read the |flower_how_to_run_simulations_link|_ guide." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:310 +msgid "Depending on your Flower version, you can run your simulation as follows:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:312 +msgid "" +"For Flower 1.11 and later, run ``flwr run`` in the terminal. This is the " +"recommended way to start simulations, other ways are deprecated and no " +"longer recommended." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:314 #, fuzzy msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"DEPRECATED For Flower versions between 1.8 and 1.10, run ``flower-" +"simulation`` in the terminal and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +" In the code snippet below, there is an example (assuming the " +"``server_app`` and ``client_app`` objects are in a ``sim.py`` module)." msgstr "" "在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " "/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " "`client_app`` 对象位于 `sim.py`` 模块中):" -#: ../../source/how-to-upgrade-to-flower-next.rst:281 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.13.rst:318 +msgid "DEPRECATED For Flower versions before 1.8, run the Python script directly." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:337 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"Depending on your Flower version, you can also define the default " +"resources as follows:" msgstr "" -"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " -"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:305 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:339 +msgid "" +"For Flower 1.11 and later, you can edit your ``pyproject.toml`` file and " +"then run ``flwr run`` in the terminal as shown in the example below." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:341 +#, fuzzy +msgid "" +"DEPRECATED For Flower versions between 1.8 and 1.10, you can adjust the " +"resources for each |clientapp_link|_ using the ``--backend-config`` " +"command line argument instead of setting the ``client_resources`` " +"argument in ``start_simulation()``." +msgstr "" +"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " +"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:344 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:384 +msgid "" +"DEPRECATED For Flower versions before 1.8, you need to run " +"``start_simulation()`` and pass a dictionary of the required resources to" +" the ``client_resources`` argument." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:375 #, fuzzy -msgid "Simulation in a Notebook" +msgid "Simulation (Notebook)" msgstr "笔记本中的模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:307 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:377 +msgid "" +"To run your simulation from within a notebook, please consider the " +"following examples depending on your Flower version:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:380 #, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"For Flower 1.11 and later, you need to run |runsim_link|_ in your " +"notebook instead of ``start_simulation()``." msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:382 +msgid "" +"DEPRECATED For Flower versions between 1.8 and 1.10, you need to run " +"|runsim_link|_ in your notebook instead of ``start_simulation()`` and " +"configure the resources." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.13.rst:453 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"Most official `Flower code examples `_ " +"are already updated to Flower 1.13 so they can serve as a reference for " +"using the Flower 1.13 API. If there are further questions, `join the " +"Flower Slack `_ (and use the channel " +"``#questions``) or post them on `Flower Discuss " +"`_ where you can find the community posting " +"and answering questions." msgstr "" "大多数官方的 `Flower 代码示例 `_" " 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " "`_ 并使用 \"#questions``\"。" -#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:460 #, fuzzy msgid "Important" msgstr "重要变更:" -#: ../../source/how-to-upgrade-to-flower-next.rst:360 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:462 #, fuzzy msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"As we continuously enhance Flower at a rapid pace, we'll be periodically " +"updating this guide. Please feel free to share any feedback with us!" msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/how-to-upgrade-to-flower-next.rst:366 +#: ../../source/how-to-upgrade-to-flower-1.13.rst:465 #, fuzzy msgid "Happy migrating! 🚀" msgstr "移民愉快!🚀" @@ -8714,7 +8274,7 @@ msgstr "" "除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " "以执行客户端剪切:" -#: ../../source/how-to-use-differential-privacy.rst:115 +#: ../../source/how-to-use-differential-privacy.rst:116 #, fuzzy msgid "" "To utilize local differential privacy (DP) and add noise to the client " @@ -8730,12 +8290,12 @@ msgstr "" msgid "local DP mod" msgstr "本地 DP 模式" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:126 #, fuzzy msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" -#: ../../source/how-to-use-differential-privacy.rst:140 +#: ../../source/how-to-use-differential-privacy.rst:144 #, fuzzy msgid "" "Please note that the order of mods, especially those that modify " @@ -8744,12 +8304,12 @@ msgid "" "parameters." msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" -#: ../../source/how-to-use-differential-privacy.rst:145 +#: ../../source/how-to-use-differential-privacy.rst:149 #, fuzzy msgid "Local Training using Privacy Engines" msgstr "使用隐私引擎进行本地培训" -#: ../../source/how-to-use-differential-privacy.rst:147 +#: ../../source/how-to-use-differential-privacy.rst:151 #, fuzzy msgid "" "For ensuring data instance-level privacy during local model training on " @@ -8757,8 +8317,8 @@ msgid "" "TensorFlow Privacy. For examples of using Flower with these engines, " "please refer to the Flower examples directory (`Opacus " "`_, `Tensorflow" -" Privacy `_)." +" Privacy `_)." msgstr "" "要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" " 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " @@ -8790,12 +8350,12 @@ msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "使用现有策略,例如 :code:`FedAvg`" #: ../../source/how-to-use-strategies.rst:11 -#: ../../source/how-to-use-strategies.rst:43 +#: ../../source/how-to-use-strategies.rst:66 msgid "Customize an existing strategy with callback functions" msgstr "使用回调函数定制现有策略" #: ../../source/how-to-use-strategies.rst:12 -#: ../../source/how-to-use-strategies.rst:99 +#: ../../source/how-to-use-strategies.rst:139 msgid "Implement a novel strategy" msgstr "实施新策略" @@ -8804,37 +8364,49 @@ msgid "Use an existing strategy" msgstr "使用现有策略" #: ../../source/how-to-use-strategies.rst:17 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Flower comes with a number of popular federated learning Strategies which" +" can be instantiated as follows:" msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" -#: ../../source/how-to-use-strategies.rst:27 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:45 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the ``start_server`` function. It is usually recommended" -" to adjust a few parameters during instantiation:" -msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" +"To make the ``ServerApp`` use this strategy, pass a ``server_fn`` " +"function to the ``ServerApp`` constructor. The ``server_fn`` function " +"should return a ``ServerAppComponents`` object that contains the strategy" +" instance and a ``ServerConfig`` instance." +msgstr "" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:50 msgid "" -"Existing strategies provide several ways to customize their behaviour. " +"Both ``Strategy`` and ``ServerConfig`` classes can be configured with " +"parameters. The ``Context`` object passed to ``server_fn`` contains the " +"values specified in the ``[tool.flwr.app.config]`` table in your " +"``pyproject.toml`` (a snippet is shown below). To access these values, " +"use ``context.run_config``." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:68 +#, fuzzy +msgid "" +"Existing strategies provide several ways to customize their behavior. " "Callback functions allow strategies to call user-provided code during " -"execution." +"execution. This approach enables you to modify the strategy's partial " +"behavior without rewriting the whole class from zero." msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" -#: ../../source/how-to-use-strategies.rst:49 +#: ../../source/how-to-use-strategies.rst:73 msgid "Configuring client fit and client evaluate" msgstr "配置客户匹配和客户评估" -#: ../../source/how-to-use-strategies.rst:51 +#: ../../source/how-to-use-strategies.rst:75 #, fuzzy msgid "" "The server can pass new configuration values to the client each round by " "providing a function to ``on_fit_config_fn``. The provided function will " "be called by the strategy and must return a dictionary of configuration " -"key values pairs that will be sent to the client. It must return a " +"key value pairs that will be sent to the client. It must return a " "dictionary of arbitrary configuration values ``client.fit`` and " "``client.evaluate`` functions during each round of federated learning." msgstr "" @@ -8842,19 +8414,22 @@ msgstr "" "提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" " dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" -#: ../../source/how-to-use-strategies.rst:84 +#: ../../source/how-to-use-strategies.rst:121 #, fuzzy msgid "" "The ``on_fit_config_fn`` can be used to pass arbitrary configuration " -"values from server to client, and potentially change these values each " +"values from server to client and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " "the dictionary returned by the ``on_fit_config_fn`` in its own " -"``client.fit()`` function." +"``client.fit()`` function. And while the values can be also passed " +"directly via the context this function can be a place to implement finer " +"control over the `fit` behaviour that may not be achieved by the context," +" which sets fixed values." msgstr "" ":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " ":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:129 #, fuzzy msgid "" "Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " @@ -8863,18 +8438,18 @@ msgstr "" "与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " ":code:`client.evaluate()` 的配置" -#: ../../source/how-to-use-strategies.rst:93 +#: ../../source/how-to-use-strategies.rst:133 msgid "Configuring server-side evaluation" msgstr "配置服务器端评估" -#: ../../source/how-to-use-strategies.rst:95 +#: ../../source/how-to-use-strategies.rst:135 #, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " "to ``evaluate_fn``." msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" -#: ../../source/how-to-use-strategies.rst:101 +#: ../../source/how-to-use-strategies.rst:141 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies `_ is a " "friendly federated learning framework." @@ -9019,35 +8591,35 @@ msgid "" "specific goal." msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/index.rst:116 +#: ../../source/index.rst:109 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/index.rst:128 +#: ../../source/index.rst:121 msgid "References" msgstr "参考资料" -#: ../../source/index.rst:130 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "以信息为导向的 API 参考资料和其他参考资料。" -#: ../../source/index.rst:139::1 +#: ../../source/index.rst:132::1 #, fuzzy msgid ":py:obj:`flwr `\\" msgstr ":py:obj:`flwr `\\" -#: ../../source/index.rst:139::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of #, fuzzy msgid "Flower main package." msgstr "Flower 主包装。" -#: ../../source/index.rst:155 +#: ../../source/index.rst:148 msgid "Contributor docs" msgstr "贡献者文档" -#: ../../source/index.rst:157 +#: ../../source/index.rst:150 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -9057,9 +8629,14 @@ msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助 msgid "Flower CLI reference" msgstr "Flower CLI 参考" -#: ../../source/ref-api-cli.rst:7 +#: ../../source/ref-api-cli.rst:5 #, fuzzy -msgid "flwr CLI" +msgid "Basic Commands" +msgstr "命令示例" + +#: ../../source/ref-api-cli.rst:10 +#, fuzzy +msgid "``flwr`` CLI" msgstr "Flower 客户端" #: ../../flwr:1 @@ -9149,9 +8726,9 @@ msgstr "" #: ../../source/ref-api-cli.rst #, fuzzy msgid "Arguments" -msgstr "参数解析器" +msgstr "构建文档" -#: ../../flwr install:1 log:1 new:1 run:1 +#: ../../flwr install:1 log:1 ls:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "可选的改进措施" @@ -9168,7 +8745,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log run +#: ../../flwr log ls run #, fuzzy msgid "default" msgstr "工作流程" @@ -9195,10 +8772,36 @@ msgstr "" msgid "Name of the federation to run the app on" msgstr "" +#: ../../flwr ls:1 +msgid "List runs." +msgstr "" + +#: ../../flwr ls:1 +msgid "List all runs" +msgstr "" + +#: ../../flwr ls:1 run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" + +#: ../../flwr ls:1 +msgid "Specific run ID to display" +msgstr "" + +#: ../../flwr ls:1 +#, fuzzy +msgid "Path of the Flower project" +msgstr "基础镜像的存储库名称。" + +#: ../../flwr ls:1 +msgid "Name of the federation" +msgstr "" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." -msgstr "Flower 服务器。" +msgstr "创建新页面" #: ../../flwr new:1 msgid "The ML framework to use" @@ -9251,11 +8854,6 @@ msgid "" "default." msgstr "" -#: ../../flwr run:1 -#, fuzzy -msgid "``False``" -msgstr "``FLWR_VERSION``" - #: ../../flwr run:1 #, fuzzy msgid "Path of the Flower App to run." @@ -9265,39 +8863,68 @@ msgstr "基础镜像的存储库名称。" msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api-cli.rst:16 +#: ../../source/ref-api-cli.rst:19 #, fuzzy -msgid "flower-simulation" -msgstr "运行模拟" - -#: ../../source/ref-api-cli.rst:26 -msgid "flower-superlink" +msgid "``flower-superlink``" msgstr "flower-superlink" -#: ../../source/ref-api-cli.rst:36 +#: ../../source/ref-api-cli.rst:29 #, fuzzy -msgid "flower-supernode" +msgid "``flower-supernode``" msgstr "Flower 服务器" -#: ../../source/ref-api-cli.rst:46 +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "Advanced Commands" +msgstr "高级安装选项" + +#: ../../source/ref-api-cli.rst:42 #, fuzzy -msgid "flower-server-app" +msgid "``flwr-serverapp``" msgstr "flower-driver-api" -#: ../../source/ref-api-cli.rst:50 +#: ../../source/ref-api-cli.rst:52 +#, fuzzy +msgid "``flwr-clientapp``" +msgstr "Flower 客户端。" + +#: ../../source/ref-api-cli.rst:60 +#, fuzzy +msgid "Technical Commands" +msgstr "命令示例" + +#: ../../source/ref-api-cli.rst:65 +#, fuzzy +msgid "``flower-simulation``" +msgstr "运行模拟" + +#: ../../source/ref-api-cli.rst:73 +#, fuzzy +msgid "Deprecated Commands" +msgstr "停用" + +#: ../../source/ref-api-cli.rst:78 +#, fuzzy +msgid "``flower-server-app``" +msgstr "flower-driver-api" + +#: ../../source/ref-api-cli.rst:82 msgid "" -"Note that since version ``1.11.0``, ``flower-server-app`` no longer " -"supports passing a reference to a `ServerApp` attribute. Instead, you " -"need to pass the path to Flower app via the argument ``--app``. This is " -"the path to a directory containing a `pyproject.toml`. You can create a " -"valid Flower app by executing ``flwr new`` and following the prompt." +"Note that from version ``1.13.0``, ``flower-server-app`` is deprecated. " +"Instead, you only need to execute |flwr_run_link|_ to start the run." msgstr "" -#: ../../source/ref-api-cli.rst:64 +#: ../../source/ref-api-cli.rst:88 #, fuzzy -msgid "flower-superexec" +msgid "``flower-superexec``" msgstr "flower-superlink" +#: ../../source/ref-api-cli.rst:92 +msgid "" +"Note that from version ``1.13.0``, ``flower-superexec`` is deprecated. " +"Instead, you only need to execute |flower_superlink_link|_." +msgstr "" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" @@ -9395,6 +9022,7 @@ msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" #: ../../source/ref-api/flwr.server.rst:24 #: ../../source/ref-api/flwr.server.strategy.rst:17 #: ../../source/ref-api/flwr.server.workflow.rst:17 +#: ../../source/ref-api/flwr.simulation.rst:26 #, fuzzy msgid "Classes" msgstr "类别" @@ -9518,6 +9146,7 @@ msgstr "Bases: :py:class:`~abc.ABC`" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:15 #, fuzzy msgid "Methods" msgstr "方法" @@ -9628,7 +9257,7 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.Driver.rst:43 #: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 @@ -9674,6 +9303,7 @@ msgstr "" #: flwr.server.driver.driver.Driver.pull_messages #: flwr.server.driver.driver.Driver.push_messages #: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.driver.driver.Driver.set_run #: flwr.server.serverapp_components.ServerAppComponents #: flwr.server.strategy.bulyan.Bulyan #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping @@ -9697,7 +9327,8 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.run_simulation.run_simulation of +#: flwr.simulation.run_simulation.run_simulation +#: flwr.simulation.simulationio_connection.SimulationIoConnection of msgid "Parameters" msgstr "参数" @@ -9715,6 +9346,7 @@ msgstr "评估指令包含从服务器接收的(全局)模型参数,以及 #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -9747,6 +9379,7 @@ msgstr "评估结果包含本地数据集上的损失值和其他详细信息, #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_error_reply #: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register @@ -9801,11 +9434,6 @@ msgstr "从服务器接收的获取属性指令包含配置值字典。" msgid "The current client properties." msgstr "当前客户端属性。" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -#, fuzzy -msgid "ClientApp" -msgstr "客户端" - #: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 #: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 #: flwr.common.context.Context:1 flwr.common.message.Error:1 @@ -9824,12 +9452,12 @@ msgstr "客户端" #: flwr.server.serverapp_components.ServerAppComponents:1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of #, fuzzy msgid "Bases: :py:class:`object`" msgstr "Bases: :py:class:`object`" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.app.start_client:51 flwr.client.app.start_numpy_client:36 #: flwr.client.client_app.ClientApp:4 #: flwr.client.client_app.ClientApp.evaluate:4 #: flwr.client.client_app.ClientApp.query:4 @@ -9838,7 +9466,7 @@ msgstr "Bases: :py:class:`object`" #: flwr.common.record.configsrecord.ConfigsRecord:20 #: flwr.common.record.metricsrecord.MetricsRecord:19 #: flwr.common.record.parametersrecord.ParametersRecord:22 -#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:46 #: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 @@ -10353,7 +9981,13 @@ msgstr "工作流程" msgid "start\\_client" msgstr "启动客户端" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: flwr.client.app.start_client:5 of +msgid "" +"This function is deprecated since 1.13.0. Use :code:`flower-supernode` " +"command instead to start a SuperNode." +msgstr "" + +#: flwr.client.app.start_client:8 flwr.client.app.start_numpy_client:9 of msgid "" "The IPv4 or IPv6 address of the server. If the Flower server runs on the " "same machine on port 8080, then `server_address` would be " @@ -10362,17 +9996,17 @@ msgstr "" "服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " "8080,则`server_address`应为`\"[::]:8080\"`。" -#: flwr.client.app.start_client:7 of +#: flwr.client.app.start_client:12 of msgid "A callable that instantiates a Client. (default: None)" msgstr "用于实例化客户端的可调用程序。(默认值:无)" -#: flwr.client.app.start_client:9 of +#: flwr.client.app.start_client:14 of msgid "" "An implementation of the abstract base class `flwr.client.Client` " "(default: None)" msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: flwr.client.app.start_client:17 flwr.client.app.start_numpy_client:15 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " server. The default should be sufficient for most models. Users who " @@ -10384,14 +10018,14 @@ msgstr "" "可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " "服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.client.app.start_client:24 flwr.client.app.start_numpy_client:22 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " "to an SSL-enabled Flower server." msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.client.app.start_client:28 flwr.client.app.start_numpy_client:26 of #, fuzzy msgid "" "Starts an insecure gRPC connection when True. Enables HTTPS connection " @@ -10400,7 +10034,7 @@ msgstr "" "为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " "None,则使用系统证书。" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.client.app.start_client:31 flwr.client.app.start_numpy_client:29 of msgid "" "Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " "bidirectional streaming - 'grpc-rere': gRPC, request-response " @@ -10409,7 +10043,15 @@ msgstr "" "配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " "'rest': HTTP(实验性)" -#: flwr.client.app.start_client:31 of +#: flwr.client.app.start_client:36 of +msgid "" +"Tuple containing the elliptic curve private key and public key for " +"authentication from the cryptography library. Source: " +"https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ Used " +"to establish an authenticated connection with the server." +msgstr "" + +#: flwr.client.app.start_client:41 of #, fuzzy msgid "" "The maximum number of times the client will try to connect to the server " @@ -10417,7 +10059,7 @@ msgid "" "no limit to the number of tries." msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" -#: flwr.client.app.start_client:35 of +#: flwr.client.app.start_client:45 of #, fuzzy msgid "" "The maximum duration before the client stops trying to connect to the " @@ -10425,16 +10067,16 @@ msgid "" "the total time." msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:59 flwr.client.app.start_numpy_client:44 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:70 flwr.client.app.start_numpy_client:52 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" @@ -10653,8 +10295,8 @@ msgstr "配置日志记录" #: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`Context `\\ \\(node\\_id\\, " -"node\\_config\\, state\\, run\\_config\\)" +":py:obj:`Context `\\ \\(run\\_id\\, node\\_id\\, " +"node\\_config\\, state\\, ...\\)" msgstr ":py:obj:`Context `\\ \\(state\\)" #: ../../source/ref-api/flwr.common.rst:68::1 @@ -11300,20 +10942,25 @@ msgstr "背景" #: flwr.common.context.Context:3 of #, fuzzy -msgid "The ID that identifies the node." +msgid "The ID that identifies the run." msgstr "错误的标识符。" #: flwr.common.context.Context:5 of +#, fuzzy +msgid "The ID that identifies the node." +msgstr "错误的标识符。" + +#: flwr.common.context.Context:7 of msgid "" "A config (key/value mapping) unique to the node and independent of the " "`run_config`. This config persists across all runs this node participates" " in." msgstr "" -#: flwr.common.context.Context:8 of +#: flwr.common.context.Context:10 of #, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " +"Holds records added by the entity in a given `run_id` and that will stay " "local. This means that the data it holds will never leave the system it's" " running from. This can be used as an intermediate storage or scratchpad " "when executing mods. It can also be used as a memory to access at " @@ -11321,29 +10968,34 @@ msgid "" "multiple rounds)" msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" -#: flwr.common.context.Context:15 of +#: flwr.common.context.Context:17 of msgid "" -"A config (key/value mapping) held by the entity in a given run and that " -"will stay local. It can be used at any point during the lifecycle of this" -" entity (e.g. across multiple rounds)" +"A config (key/value mapping) held by the entity in a given `run_id` and " +"that will stay local. It can be used at any point during the lifecycle of" +" this entity (e.g. across multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr ":py:obj:`src_node_id `\\" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`state `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:32::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr ":py:obj:`config `\\" @@ -12024,24 +11676,6 @@ msgstr "" ":py:obj:`START_SIMULATION_LEAVE " "`\\" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" - -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" - #: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" @@ -12755,6 +12389,11 @@ msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收 msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.common.message.Message.create_error_reply:12 of +#, fuzzy +msgid "**message** -- A Message containing only the relevant error and metadata." +msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" + #: flwr.common.message.Message.create_reply:3 of #, fuzzy msgid "" @@ -12811,6 +12450,10 @@ msgstr ":py:obj:`GET_PARAMETERS `\ msgid ":py:obj:`GET_PROPERTIES `\\" msgstr ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "描述数据" + #: flwr.common.Metadata.created_at:1::1 #: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of #, fuzzy @@ -13472,7 +13115,7 @@ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:37::1 #: flwr.server.driver.driver.Driver:1 of #, fuzzy -msgid "Abstract base Driver class for the Driver API." +msgid "Abstract base Driver class for the ServerAppIo API." msgstr "Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.server.rst:37::1 @@ -13680,6 +13323,10 @@ msgstr "等待至少 `num_clients` 可用。" msgid "**num_available** -- The number of currently available clients." msgstr "**num_available** -- 当前可用客户端的数量。" +#: flwr.server.client_manager.ClientManager.register:3 of +msgid "The ClientProxy of the Client to register." +msgstr "" + #: flwr.server.client_manager.ClientManager.register:6 #: flwr.server.client_manager.SimpleClientManager.register:6 of #, fuzzy @@ -13695,12 +13342,16 @@ msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册 msgid "This method is idempotent." msgstr "这种方法是幂等的。" +#: flwr.server.client_manager.ClientManager.unregister:5 of +msgid "The ClientProxy of the Client to unregister." +msgstr "" + #: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy msgid "Driver" msgstr "服务器" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`create_message `\\ " @@ -13709,24 +13360,24 @@ msgstr "" ":py:obj:`create_message `\\ " "\\(content\\, message\\_type\\, ...\\)" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.create_message:1 of #, fuzzy msgid "Create a new message with specified parameters." msgstr "使用指定参数创建新信息。" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr ":py:obj:`get_node_ids `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.get_node_ids:1 of #, fuzzy msgid "Get node IDs." msgstr "获取节点 ID。" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`pull_messages `\\ " @@ -13735,13 +13386,13 @@ msgstr "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of #, fuzzy msgid "Pull messages based on message IDs." msgstr "根据信息 ID 提取信息。" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`push_messages `\\ " @@ -13750,13 +13401,13 @@ msgstr "" ":py:obj:`push_messages `\\ " "\\(messages\\)" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.push_messages:1 of #, fuzzy msgid "Push messages to specified node IDs." msgstr "向指定的节点 ID 推送信息。" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #, fuzzy msgid "" ":py:obj:`send_and_receive `\\ " @@ -13766,12 +13417,22 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of #, fuzzy msgid "Push messages to specified node IDs and pull the reply messages." msgstr "向指定的节点 ID 推送信息并提取回复信息。" +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#, fuzzy +msgid ":py:obj:`set_run `\\ \\(run\\_id\\)" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" + +#: ../../source/ref-api/flwr.server.Driver.rst:41::1 +#: flwr.server.driver.driver.Driver.set_run:1 of +msgid "Request a run to the SuperLink with a given `run_id`." +msgstr "" + #: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy msgid ":py:obj:`run `\\" @@ -13902,6 +13563,17 @@ msgstr "" "该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " "`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" +#: flwr.server.driver.driver.Driver.set_run:3 of +msgid "" +"If a Run with the specified `run_id` exists, a local Run object will be " +"created. It enables further functionality in the driver, such as sending " +"`Messages`." +msgstr "" + +#: flwr.server.driver.driver.Driver.set_run:7 of +msgid "The `run_id` of the Run this Driver object operates in." +msgstr "" + #: ../../source/ref-api/flwr.server.History.rst:2 #, fuzzy msgid "History" @@ -14002,42 +13674,47 @@ msgstr "遗留上下文" msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "Bases: :py:class:`~flwr.common.context.Context`" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`config `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`strategy `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`client_manager `\\" msgstr ":py:obj:`client_manager `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`history `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_id `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`node_config `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`state `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:36::1 #, fuzzy msgid ":py:obj:`run_config `\\" msgstr "server.strategy.Strategy" @@ -14139,11 +13816,6 @@ msgstr ":py:obj:`set_strategy `\\ \\(strategy\\ msgid "Replace server strategy." msgstr "server.strategy" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -#, fuzzy -msgid "ServerApp" -msgstr "服务器" - #: flwr.server.server_app.ServerApp:5 of #, fuzzy msgid "Use the `ServerApp` with an existing `Strategy`:" @@ -14177,7 +13849,7 @@ msgid "" "thereof. If no instance is provided, one will be created internally." msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.app.start_server:9 +#: flwr.server.app.start_server:14 #: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " @@ -14335,17 +14007,23 @@ msgstr "**success**" msgid "start\\_server" msgstr "server.start_server" -#: flwr.server.app.start_server:3 of +#: flwr.server.app.start_server:5 of +msgid "" +"This function is deprecated since 1.13.0. Use the :code:`flower-" +"superlink` command instead to start a SuperLink." +msgstr "" + +#: flwr.server.app.start_server:8 of msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.app.start_server:5 of +#: flwr.server.app.start_server:10 of msgid "" "A server implementation, either `flwr.server.Server` or a subclass " "thereof. If no instance is provided, then `start_server` will create one." msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.app.start_server:12 of +#: flwr.server.app.start_server:17 of msgid "" "An implementation of the abstract base class " "`flwr.server.strategy.Strategy`. If no strategy is provided, then " @@ -14354,7 +14032,7 @@ msgstr "" "抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " "`flwr.server.strategy.FedAvg`。" -#: flwr.server.app.start_server:16 of +#: flwr.server.app.start_server:21 of msgid "" "An implementation of the abstract base class `flwr.server.ClientManager`." " If no implementation is provided, then `start_server` will use " @@ -14363,7 +14041,7 @@ msgstr "" "抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " "`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.app.start_server:21 of +#: flwr.server.app.start_server:26 of msgid "" "The maximum length of gRPC messages that can be exchanged with the Flower" " clients. The default should be sufficient for most models. Users who " @@ -14375,7 +14053,7 @@ msgstr "" "可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " "客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " @@ -14385,34 +14063,34 @@ msgstr "" "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " "服务器私钥。" -#: flwr.server.app.start_server:28 of +#: flwr.server.app.start_server:33 of msgid "" "Tuple containing root certificate, server certificate, and private key to" " start a secure SSL-enabled server. The tuple is expected to have three " "bytes elements in the following order:" msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" -#: flwr.server.app.start_server:32 of +#: flwr.server.app.start_server:37 of msgid "CA certificate." msgstr "CA 证书。" -#: flwr.server.app.start_server:33 of +#: flwr.server.app.start_server:38 of msgid "server certificate." msgstr "服务器证书。" -#: flwr.server.app.start_server:34 of +#: flwr.server.app.start_server:39 of msgid "server private key." msgstr "服务器私人密钥。" -#: flwr.server.app.start_server:37 of +#: flwr.server.app.start_server:42 of msgid "**hist** -- Object containing training and evaluation metrics." msgstr "**hist** -- 包含训练和评估指标的对象。" -#: flwr.server.app.start_server:42 of +#: flwr.server.app.start_server:47 of msgid "Starting an insecure server:" msgstr "启动不安全的服务器:" -#: flwr.server.app.start_server:46 of +#: flwr.server.app.start_server:51 of msgid "Starting an SSL-enabled server:" msgstr "启动支持 SSL 的服务器:" @@ -16030,7 +15708,7 @@ msgstr "" "\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:1231 +#: ../../source/ref-changelog.md:1434 msgid "FedAdagrad" msgstr "FedAdagrad" @@ -18304,7 +17982,7 @@ msgstr "" msgid "simulation" msgstr "运行模拟" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #, fuzzy msgid "" ":py:obj:`run_simulation `\\ " @@ -18313,13 +17991,29 @@ msgstr "" ":py:obj:`run_simulation `\\ " "\\(server\\_app\\, client\\_app\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.run_simulation.run_simulation:1 of #, fuzzy msgid "Run a Flower App using the Simulation Engine." msgstr "使用模拟引擎运行花朵应用程序。" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#, fuzzy +msgid "" +":py:obj:`run_simulation_process " +"`\\ \\(...\\[\\, flwr\\_dir\\_\\," +" ...\\]\\)" +msgstr "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 +#: flwr.simulation.app.run_simulation_process:1 of +#, fuzzy +msgid "Run Flower Simulation process." +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.rst:24::1 #, fuzzy msgid "" ":py:obj:`start_simulation `\\ " @@ -18328,11 +18022,42 @@ msgstr "" ":py:obj:`start_simulation `\\ \\(\\*\\," " client\\_fn\\[\\, ...\\]\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.simulation.rst:24::1 #: flwr.simulation.start_simulation:1 of msgid "Log error stating that module `ray` could not be imported." msgstr "" +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#, fuzzy +msgid "" +":py:obj:`SimulationIoConnection " +"`\\ \\(\\[...\\]\\)" +msgstr "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.simulation.rst:31::1 +#: flwr.simulation.simulationio_connection.SimulationIoConnection:1 of +msgid "`SimulationIoConnection` provides an interface to the SimulationIo API." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.SimulationIoConnection.rst:2 +#, fuzzy +msgid "SimulationIoConnection" +msgstr "运行模拟" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:3 of +msgid "The address (URL, IPv6, IPv4) of the SuperLink SimulationIo API service." +msgstr "" + +#: flwr.simulation.simulationio_connection.SimulationIoConnection:5 of +#, fuzzy +msgid "" +"The PEM-encoded root certificates as a byte string. If provided, a secure" +" connection using the certificates will be established to an SSL-enabled " +"Flower server." +msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" + #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 #, fuzzy msgid "run\\_simulation" @@ -18398,6 +18123,11 @@ msgid "" "If enabled, DEBUG-level logs will be displayed." msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" +#: ../../source/ref-api/flwr.simulation.run_simulation_process.rst:2 +#, fuzzy +msgid "run\\_simulation\\_process" +msgstr "运行模拟" + #: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 #, fuzzy msgid "start\\_simulation" @@ -18409,25 +18139,27 @@ msgstr "更新日志" #: ../../source/ref-changelog.md:3 #, fuzzy -msgid "v1.11.1 (2024-09-11)" -msgstr "v1.3.0 (2023-02-06)" +msgid "v1.13.1 (2024-11-26)" +msgstr "v1.4.0 (2023-04-21)" #: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 +#: ../../source/ref-changelog.md:138 ../../source/ref-changelog.md:208 +#: ../../source/ref-changelog.md:240 ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:442 ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:606 ../../source/ref-changelog.md:699 +#: ../../source/ref-changelog.md:799 ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:1074 ../../source/ref-changelog.md:1143 msgid "Thanks to our contributors" msgstr "感谢我们的贡献者" #: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:140 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:242 ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:444 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:608 ../../source/ref-changelog.md:701 +#: ../../source/ref-changelog.md:801 ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:949 ../../source/ref-changelog.md:1007 msgid "" "We would like to give our special thanks to all the contributors who made" " the new version of Flower possible (in `git shortlog` order):" @@ -18436,8 +18168,8 @@ msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献 #: ../../source/ref-changelog.md:9 #, fuzzy msgid "" -"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " -"`Javier`, `Robert Steiner`, `Yan Gao` " +"`Adam Narozniak`, `Charles Beauville`, `Heng Pan`, `Javier`, `Robert " +"Steiner` " msgstr "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -18445,107 +18177,151 @@ msgstr "" "Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:11 -#, fuzzy -msgid "Improvements" -msgstr "可选的改进措施" +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:144 ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:448 +#: ../../source/ref-changelog.md:548 ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:705 ../../source/ref-changelog.md:805 +#: ../../source/ref-changelog.md:889 ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:1011 ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1209 ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1318 ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1429 ../../source/ref-changelog.md:1468 +#: ../../source/ref-changelog.md:1501 ../../source/ref-changelog.md:1551 +msgid "What's new?" +msgstr "有什么新内容?" #: ../../source/ref-changelog.md:13 #, fuzzy msgid "" -"**Implement** `keys/values/items` **methods for** `TypedDict` " -"([#4146](https://github.com/adap/flower/pull/4146))" +"**Fix `SimulationEngine` Executor for SuperLink** " +"([#4563](https://github.com/adap/flower/pull/4563), " +"[#4568](https://github.com/adap/flower/pull/4568), " +"[#4570](https://github.com/adap/flower/pull/4570))" msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" #: ../../source/ref-changelog.md:15 -#, fuzzy msgid "" -"**Fix parsing of** `--executor-config` **if present** " -"([#4125](https://github.com/adap/flower/pull/4125))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"Resolved an issue that prevented SuperLink from functioning correctly " +"when using the `SimulationEngine` executor." +msgstr "" #: ../../source/ref-changelog.md:17 #, fuzzy msgid "" -"**Adjust framework name in templates docstrings** " -"([#4127](https://github.com/adap/flower/pull/4127))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"**Improve FAB build and install** " +"([#4571](https://github.com/adap/flower/pull/4571))" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" #: ../../source/ref-changelog.md:19 -#, fuzzy msgid "" -"**Update** `flwr new` **Hugging Face template** " -"([#4169](https://github.com/adap/flower/pull/4169))" +"An updated FAB build and install process produces smaller FAB files and " +"doesn't rely on `pip install` any more. It also resolves an issue where " +"all files were unnecessarily included in the FAB file. The `flwr` CLI " +"commands now correctly pack only the necessary files, such as `.md`, " +"`.toml` and `.py`, ensuring more efficient and accurate packaging." msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" #: ../../source/ref-changelog.md:21 #, fuzzy msgid "" -"**Fix** `flwr new` **FlowerTune template** " -"([#4123](https://github.com/adap/flower/pull/4123))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"**Update** `embedded-devices` **example** " +"([#4381](https://github.com/adap/flower/pull/4381))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" #: ../../source/ref-changelog.md:23 -#, fuzzy -msgid "" -"**Add buffer time after** `ServerApp` **thread initialization** " -"([#4119](https://github.com/adap/flower/pull/4119))" +msgid "The example now uses the `flwr run` command and the Deployment Engine." msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" #: ../../source/ref-changelog.md:25 #, fuzzy msgid "" -"**Handle unsuitable resources for simulation** " -"([#4143](https://github.com/adap/flower/pull/4143))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +"**Update Documentation** " +"([#4566](https://github.com/adap/flower/pull/4566), " +"[#4569](https://github.com/adap/flower/pull/4569), " +"[#4560](https://github.com/adap/flower/pull/4560), " +"[#4556](https://github.com/adap/flower/pull/4556), " +"[#4581](https://github.com/adap/flower/pull/4581), " +"[#4537](https://github.com/adap/flower/pull/4537), " +"[#4562](https://github.com/adap/flower/pull/4562), " +"[#4582](https://github.com/adap/flower/pull/4582))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" #: ../../source/ref-changelog.md:27 -#, fuzzy msgid "" -"**Update example READMEs** " -"([#4117](https://github.com/adap/flower/pull/4117))" +"Enhanced documentation across various aspects, including updates to " +"translation workflows, Docker-related READMEs, and recommended datasets. " +"Improvements also include formatting fixes for dataset partitioning docs " +"and better references to resources in the datasets documentation index." msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" #: ../../source/ref-changelog.md:29 #, fuzzy msgid "" -"**Update SuperNode authentication docs** " -"([#4160](https://github.com/adap/flower/pull/4160))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +"**Update Infrastructure and CI/CD** " +"([#4577](https://github.com/adap/flower/pull/4577), " +"[#4578](https://github.com/adap/flower/pull/4578), " +"[#4558](https://github.com/adap/flower/pull/4558), " +"[#4551](https://github.com/adap/flower/pull/4551), " +"[#3356](https://github.com/adap/flower/pull/3356), " +"[#4559](https://github.com/adap/flower/pull/4559), " +"[#4575](https://github.com/adap/flower/pull/4575))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 -#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 -#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 -#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 -#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 -#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 -#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 -#: ../../source/ref-changelog.md:1253 -msgid "Incompatible changes" -msgstr "不兼容的更改" +#: ../../source/ref-changelog.md:31 +#, fuzzy +msgid "" +"**General improvements** " +"([#4557](https://github.com/adap/flower/pull/4557), " +"[#4564](https://github.com/adap/flower/pull/4564), " +"[#4573](https://github.com/adap/flower/pull/4573), " +"[#4561](https://github.com/adap/flower/pull/4561), " +"[#4579](https://github.com/adap/flower/pull/4579), " +"[#4572](https://github.com/adap/flower/pull/4572))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" + +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:198 ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:408 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" #: ../../source/ref-changelog.md:35 #, fuzzy -msgid "v1.11.0 (2024-08-30)" -msgstr "v1.3.0 (2023-02-06)" +msgid "v1.13.0 (2024-11-20)" +msgstr "v1.4.0 (2023-04-21)" #: ../../source/ref-changelog.md:41 #, fuzzy msgid "" "`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," -" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " -"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +"Beutel`, `Daniel Nata Nugraha`, `Dimitris Stripelis`, `Heng Pan`, " +"`Javier`, `Mohammad Naseri`, `Robert Steiner`, `Waris Gill`, `William " +"Lindskog`, `Yan Gao`, `Yao Xu`, `wwjang` " msgstr "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -18553,143 +18329,115 @@ msgstr "" "Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 -#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 -#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 -#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 -#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 -#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 -#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 -#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 -#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 -#: ../../source/ref-changelog.md:1348 -msgid "What's new?" -msgstr "有什么新内容?" - #: ../../source/ref-changelog.md:45 +#, fuzzy msgid "" -"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " -"([#4006](https://github.com/adap/flower/pull/4006), " -"[#3945](https://github.com/adap/flower/pull/3945), " -"[#3999](https://github.com/adap/flower/pull/3999), " -"[#4027](https://github.com/adap/flower/pull/4027), " -"[#3851](https://github.com/adap/flower/pull/3851), " -"[#3946](https://github.com/adap/flower/pull/3946), " -"[#4003](https://github.com/adap/flower/pull/4003), " -"[#4029](https://github.com/adap/flower/pull/4029), " -"[#3942](https://github.com/adap/flower/pull/3942), " -"[#3957](https://github.com/adap/flower/pull/3957), " -"[#4020](https://github.com/adap/flower/pull/4020), " -"[#4044](https://github.com/adap/flower/pull/4044), " -"[#3852](https://github.com/adap/flower/pull/3852), " -"[#4019](https://github.com/adap/flower/pull/4019), " -"[#4031](https://github.com/adap/flower/pull/4031), " -"[#4036](https://github.com/adap/flower/pull/4036), " -"[#4049](https://github.com/adap/flower/pull/4049), " -"[#4017](https://github.com/adap/flower/pull/4017), " -"[#3943](https://github.com/adap/flower/pull/3943), " -"[#3944](https://github.com/adap/flower/pull/3944), " -"[#4011](https://github.com/adap/flower/pull/4011), " -"[#3619](https://github.com/adap/flower/pull/3619))" +"**Introduce `flwr ls` command** " +"([#4460](https://github.com/adap/flower/pull/4460), " +"[#4459](https://github.com/adap/flower/pull/4459), " +"[#4477](https://github.com/adap/flower/pull/4477))" msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" #: ../../source/ref-changelog.md:47 msgid "" -"Dynamic code updates are here! `flwr run` can now ship and install the " -"latest version of your `ServerApp` and `ClientApp` to an already-running " -"federation (SuperLink and SuperNodes)." +"The `flwr ls` command is now available to display details about all runs " +"(or one specific run). It supports the following usage options:" msgstr "" #: ../../source/ref-changelog.md:49 -msgid "" -"How does it work? `flwr run` bundles your Flower app into a single FAB " -"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," -" to both the SuperLink and those SuperNodes that need it. This allows you" -" to keep SuperExec, SuperLink and SuperNodes running as permanent " -"infrastructure, and then ship code updates (including completely new " -"projects!) dynamically." -msgstr "" - -#: ../../source/ref-changelog.md:51 -msgid "`flwr run` is all you need." +msgid "`flwr ls --runs [] []`: Lists all runs." msgstr "" -#: ../../source/ref-changelog.md:53 -#, fuzzy +#: ../../source/ref-changelog.md:50 msgid "" -"**Introduce isolated** `ClientApp` **execution** " -"([#3970](https://github.com/adap/flower/pull/3970), " -"[#3976](https://github.com/adap/flower/pull/3976), " -"[#4002](https://github.com/adap/flower/pull/4002), " -"[#4001](https://github.com/adap/flower/pull/4001), " -"[#4034](https://github.com/adap/flower/pull/4034), " -"[#4037](https://github.com/adap/flower/pull/4037), " -"[#3977](https://github.com/adap/flower/pull/3977), " -"[#4042](https://github.com/adap/flower/pull/4042), " -"[#3978](https://github.com/adap/flower/pull/3978), " -"[#4039](https://github.com/adap/flower/pull/4039), " -"[#4033](https://github.com/adap/flower/pull/4033), " -"[#3971](https://github.com/adap/flower/pull/3971), " -"[#4035](https://github.com/adap/flower/pull/4035), " -"[#3973](https://github.com/adap/flower/pull/3973), " -"[#4032](https://github.com/adap/flower/pull/4032))" +"`flwr ls --run-id [] []`: Displays details for " +"a specific run." msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:52 msgid "" -"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" -" enterprise deployment, this allows you to set strict limits on what the " -"`ClientApp` can and cannot do." +"This command provides information including the run ID, FAB ID and " +"version, run status, elapsed time, and timestamps for when the run was " +"created, started running, and finished." msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "`flower-supernode` supports three `--isolation` modes:" +#: ../../source/ref-changelog.md:54 +#, fuzzy +msgid "" +"**Fuse SuperLink and SuperExec** " +"([#4358](https://github.com/adap/flower/pull/4358), " +"[#4403](https://github.com/adap/flower/pull/4403), " +"[#4406](https://github.com/adap/flower/pull/4406), " +"[#4357](https://github.com/adap/flower/pull/4357), " +"[#4359](https://github.com/adap/flower/pull/4359), " +"[#4354](https://github.com/adap/flower/pull/4354), " +"[#4229](https://github.com/adap/flower/pull/4229), " +"[#4283](https://github.com/adap/flower/pull/4283), " +"[#4352](https://github.com/adap/flower/pull/4352))" msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:56 msgid "" -"Unset: The SuperNode runs the `ClientApp` in the same process (as in " -"previous versions of Flower). This is the default mode." +"SuperExec has been integrated into SuperLink, enabling SuperLink to " +"directly manage ServerApp processes (`flwr-serverapp`). The `flwr` CLI " +"now targets SuperLink's Exec API. Additionally, SuperLink introduces two " +"isolation modes for running ServerApps: `subprocess` (default) and " +"`process`, which can be specified using the `--isolation " +"{subprocess,process}` flag." msgstr "" -#: ../../source/ref-changelog.md:60 +#: ../../source/ref-changelog.md:58 +#, fuzzy msgid "" -"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " -"`ClientApp`." +"**Introduce `flwr-serverapp` command** " +"([#4394](https://github.com/adap/flower/pull/4394), " +"[#4370](https://github.com/adap/flower/pull/4370), " +"[#4367](https://github.com/adap/flower/pull/4367), " +"[#4350](https://github.com/adap/flower/pull/4350), " +"[#4364](https://github.com/adap/flower/pull/4364), " +"[#4400](https://github.com/adap/flower/pull/4400), " +"[#4363](https://github.com/adap/flower/pull/4363), " +"[#4401](https://github.com/adap/flower/pull/4401), " +"[#4388](https://github.com/adap/flower/pull/4388), " +"[#4402](https://github.com/adap/flower/pull/4402))" msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:60 msgid "" -"`--isolation=process`: The SuperNode expects an externally-managed " -"process to run the `ClientApp`. This external process is not managed by " -"the SuperNode, so it has to be started beforehand and terminated " -"manually. The common way to use this isolation mode is via the new " -"`flwr/clientapp` Docker image." +"The `flwr-serverapp` command has been introduced as a CLI entry point " +"that runs a `ServerApp` process. This process communicates with SuperLink" +" to load and execute the `ServerApp` object, enabling isolated execution " +"and more flexible deployment." msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:62 #, fuzzy msgid "" -"**Improve Docker support for enterprise deployments** " -"([#4050](https://github.com/adap/flower/pull/4050), " -"[#4090](https://github.com/adap/flower/pull/4090), " -"[#3784](https://github.com/adap/flower/pull/3784), " -"[#3998](https://github.com/adap/flower/pull/3998), " -"[#4094](https://github.com/adap/flower/pull/4094), " -"[#3722](https://github.com/adap/flower/pull/3722))" +"**Improve simulation engine and introduce `flwr-simulation` command** " +"([#4433](https://github.com/adap/flower/pull/4433), " +"[#4486](https://github.com/adap/flower/pull/4486), " +"[#4448](https://github.com/adap/flower/pull/4448), " +"[#4427](https://github.com/adap/flower/pull/4427), " +"[#4438](https://github.com/adap/flower/pull/4438), " +"[#4421](https://github.com/adap/flower/pull/4421), " +"[#4430](https://github.com/adap/flower/pull/4430), " +"[#4462](https://github.com/adap/flower/pull/4462))" msgstr "" "**移除对 Python 3.7 的支持** " "([#2280](https://github.com/adap/flower/pull/2280), " @@ -18699,69 +18447,154 @@ msgstr "" "[#2355](https://github.com/adap/flower/pull/2355), " "[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:64 msgid "" -"Flower 1.11 ships many Docker improvements that are especially useful for" -" enterprise deployments:" +"The simulation engine has been significantly improved, resulting in " +"dramatically faster simulations. Additionally, the `flwr-simulation` " +"command has been introduced to enhance maintainability and provide a " +"dedicated entry point for running simulations." msgstr "" -#: ../../source/ref-changelog.md:67 -msgid "`flwr/supernode` comes with a new Alpine Docker image." +#: ../../source/ref-changelog.md:66 +#, fuzzy +msgid "" +"**Improve SuperLink message management** " +"([#4378](https://github.com/adap/flower/pull/4378), " +"[#4369](https://github.com/adap/flower/pull/4369))" msgstr "" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" #: ../../source/ref-changelog.md:68 msgid "" -"`flwr/clientapp` is a new image to be used with the `--isolation=process`" -" option. In this mode, SuperNode and `ClientApp` run in two different " -"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " -"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " -"runs the `ClientApp`. This is the recommended way to deploy Flower in " -"enterprise settings." +"SuperLink now validates the destination node ID of instruction messages " +"and checks the TTL (time-to-live) for reply messages. When pulling reply " +"messages, an error reply will be generated and returned if the " +"corresponding instruction message does not exist, has expired, or if the " +"reply message exists but has expired." msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:70 +#, fuzzy +msgid "" +"**Introduce FedDebug baseline** " +"([#3783](https://github.com/adap/flower/pull/3783))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:72 msgid "" -"New all-in-one Docker Compose enables you to easily start a full Flower " -"Deployment Engine on a single machine." +"FedDebug is a framework that enhances debugging in Federated Learning by " +"enabling interactive inspection of the training process and automatically" +" identifying clients responsible for degrading the global model's " +"performance—all without requiring testing data or labels. Learn more in " +"the [FedDebug baseline " +"documentation](https://flower.ai/docs/baselines/feddebug.html)." msgstr "" -#: ../../source/ref-changelog.md:70 +#: ../../source/ref-changelog.md:74 msgid "" -"Completely new Docker documentation: " -"https://flower.ai/docs/framework/docker/index.html" +"**Update documentation** " +"([#4511](https://github.com/adap/flower/pull/4511), " +"[#4010](https://github.com/adap/flower/pull/4010), " +"[#4396](https://github.com/adap/flower/pull/4396), " +"[#4499](https://github.com/adap/flower/pull/4499), " +"[#4269](https://github.com/adap/flower/pull/4269), " +"[#3340](https://github.com/adap/flower/pull/3340), " +"[#4482](https://github.com/adap/flower/pull/4482), " +"[#4387](https://github.com/adap/flower/pull/4387), " +"[#4342](https://github.com/adap/flower/pull/4342), " +"[#4492](https://github.com/adap/flower/pull/4492), " +"[#4474](https://github.com/adap/flower/pull/4474), " +"[#4500](https://github.com/adap/flower/pull/4500), " +"[#4514](https://github.com/adap/flower/pull/4514), " +"[#4236](https://github.com/adap/flower/pull/4236), " +"[#4112](https://github.com/adap/flower/pull/4112), " +"[#3367](https://github.com/adap/flower/pull/3367), " +"[#4501](https://github.com/adap/flower/pull/4501), " +"[#4373](https://github.com/adap/flower/pull/4373), " +"[#4409](https://github.com/adap/flower/pull/4409), " +"[#4356](https://github.com/adap/flower/pull/4356), " +"[#4520](https://github.com/adap/flower/pull/4520), " +"[#4524](https://github.com/adap/flower/pull/4524), " +"[#4525](https://github.com/adap/flower/pull/4525), " +"[#4526](https://github.com/adap/flower/pull/4526), " +"[#4527](https://github.com/adap/flower/pull/4527), " +"[#4528](https://github.com/adap/flower/pull/4528), " +"[#4545](https://github.com/adap/flower/pull/4545), " +"[#4522](https://github.com/adap/flower/pull/4522), " +"[#4534](https://github.com/adap/flower/pull/4534), " +"[#4513](https://github.com/adap/flower/pull/4513), " +"[#4529](https://github.com/adap/flower/pull/4529), " +"[#4441](https://github.com/adap/flower/pull/4441), " +"[#4530](https://github.com/adap/flower/pull/4530), " +"[#4470](https://github.com/adap/flower/pull/4470), " +"[#4553](https://github.com/adap/flower/pull/4553), " +"[#4531](https://github.com/adap/flower/pull/4531), " +"[#4554](https://github.com/adap/flower/pull/4554), " +"[#4555](https://github.com/adap/flower/pull/4555), " +"[#4552](https://github.com/adap/flower/pull/4552), " +"[#4533](https://github.com/adap/flower/pull/4533))" msgstr "" -#: ../../source/ref-changelog.md:72 +#: ../../source/ref-changelog.md:76 +msgid "" +"Many documentation pages and tutorials have been updated to improve " +"clarity, fix typos, incorporate user feedback, and stay aligned with the " +"latest features in the framework. Key updates include adding a guide for " +"designing stateful `ClientApp` objects, updating the comprehensive guide " +"for setting up and running Flower's `Simulation Engine`, updating the " +"XGBoost, scikit-learn, and JAX quickstart tutorials to use `flwr run`, " +"updating DP guide, removing outdated pages, updating Docker docs, and " +"marking legacy functions as deprecated. The [Secure Aggregation " +"Protocols](https://flower.ai/docs/framework/contributor-ref-secure-" +"aggregation-protocols.html) page has also been updated." +msgstr "" + +#: ../../source/ref-changelog.md:78 #, fuzzy msgid "" -"**Improve SuperNode authentication** " -"([#4043](https://github.com/adap/flower/pull/4043), " -"[#4047](https://github.com/adap/flower/pull/4047), " -"[#4074](https://github.com/adap/flower/pull/4074))" +"**Update examples and templates** " +"([#4510](https://github.com/adap/flower/pull/4510), " +"[#4368](https://github.com/adap/flower/pull/4368), " +"[#4121](https://github.com/adap/flower/pull/4121), " +"[#4329](https://github.com/adap/flower/pull/4329), " +"[#4382](https://github.com/adap/flower/pull/4382), " +"[#4248](https://github.com/adap/flower/pull/4248), " +"[#4395](https://github.com/adap/flower/pull/4395), " +"[#4386](https://github.com/adap/flower/pull/4386), " +"[#4408](https://github.com/adap/flower/pull/4408))" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:74 +#: ../../source/ref-changelog.md:80 msgid "" -"SuperNode auth has been improved in several ways, including improved " -"logging, improved testing, and improved error handling." +"Multiple examples and templates have been updated to enhance usability " +"and correctness. The updates include the `30-minute-tutorial`, " +"`quickstart-jax`, `quickstart-pytorch`, `advanced-tensorflow` examples, " +"and the FlowerTune template." msgstr "" -#: ../../source/ref-changelog.md:76 +#: ../../source/ref-changelog.md:82 #, fuzzy msgid "" -"**Update** `flwr new` **templates** " -"([#3933](https://github.com/adap/flower/pull/3933), " -"[#3894](https://github.com/adap/flower/pull/3894), " -"[#3930](https://github.com/adap/flower/pull/3930), " -"[#3931](https://github.com/adap/flower/pull/3931), " -"[#3997](https://github.com/adap/flower/pull/3997), " -"[#3979](https://github.com/adap/flower/pull/3979), " -"[#3965](https://github.com/adap/flower/pull/3965), " -"[#4013](https://github.com/adap/flower/pull/4013), " -"[#4064](https://github.com/adap/flower/pull/4064))" +"**Improve Docker support** " +"([#4506](https://github.com/adap/flower/pull/4506), " +"[#4424](https://github.com/adap/flower/pull/4424), " +"[#4224](https://github.com/adap/flower/pull/4224), " +"[#4413](https://github.com/adap/flower/pull/4413), " +"[#4414](https://github.com/adap/flower/pull/4414), " +"[#4336](https://github.com/adap/flower/pull/4336), " +"[#4420](https://github.com/adap/flower/pull/4420), " +"[#4407](https://github.com/adap/flower/pull/4407), " +"[#4422](https://github.com/adap/flower/pull/4422), " +"[#4532](https://github.com/adap/flower/pull/4532), " +"[#4540](https://github.com/adap/flower/pull/4540))" msgstr "" "**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " "[#1866](https://github.com/adap/flower/pull/1866), " @@ -18770,274 +18603,291 @@ msgstr "" "[#1477](https://github.com/adap/flower/pull/1477), " "[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:78 +#: ../../source/ref-changelog.md:84 msgid "" -"All `flwr new` templates have been updated to show the latest recommended" -" use of Flower APIs." +"Docker images and configurations have been updated, including updating " +"Docker Compose files to version 1.13.0, refactoring the Docker build " +"matrix for better maintainability, updating `docker/build-push-action` to" +" 6.9.0, and improving Docker documentation." msgstr "" -#: ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:86 #, fuzzy msgid "" -"**Improve Simulation Engine** " -"([#4095](https://github.com/adap/flower/pull/4095), " -"[#3913](https://github.com/adap/flower/pull/3913), " -"[#4059](https://github.com/adap/flower/pull/4059), " -"[#3954](https://github.com/adap/flower/pull/3954), " -"[#4071](https://github.com/adap/flower/pull/4071), " -"[#3985](https://github.com/adap/flower/pull/3985), " -"[#3988](https://github.com/adap/flower/pull/3988))" +"**Allow app installation without internet access** " +"([#4479](https://github.com/adap/flower/pull/4479), " +"[#4475](https://github.com/adap/flower/pull/4475))" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:82 +#: ../../source/ref-changelog.md:88 msgid "" -"The Flower Simulation Engine comes with several updates, including " -"improved run config support, verbose logging, simulation backend " -"configuration via `flwr run`, and more." +"The `flwr build` command now includes a wheel file in the FAB, enabling " +"Flower app installation in environments without internet access via `flwr" +" install`." msgstr "" -#: ../../source/ref-changelog.md:84 +#: ../../source/ref-changelog.md:90 #, fuzzy msgid "" -"**Improve** `RecordSet` " -"([#4052](https://github.com/adap/flower/pull/4052), " -"[#3218](https://github.com/adap/flower/pull/3218), " -"[#4016](https://github.com/adap/flower/pull/4016))" +"**Improve `flwr log` command** " +"([#4391](https://github.com/adap/flower/pull/4391), " +"[#4411](https://github.com/adap/flower/pull/4411), " +"[#4390](https://github.com/adap/flower/pull/4390), " +"[#4397](https://github.com/adap/flower/pull/4397))" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:86 +#: ../../source/ref-changelog.md:92 +#, fuzzy msgid "" -"`RecordSet` is the core object to exchange model parameters, " -"configuration values and metrics between `ClientApp` and `ServerApp`. " -"This release ships several smaller improvements to `RecordSet` and " -"related `*Record` types." +"**Refactor SuperNode for better maintainability and efficiency** " +"([#4439](https://github.com/adap/flower/pull/4439), " +"[#4348](https://github.com/adap/flower/pull/4348), " +"[#4512](https://github.com/adap/flower/pull/4512), " +"[#4485](https://github.com/adap/flower/pull/4485))" msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:88 +#: ../../source/ref-changelog.md:94 #, fuzzy msgid "" -"**Update documentation** " -"([#3972](https://github.com/adap/flower/pull/3972), " -"[#3925](https://github.com/adap/flower/pull/3925), " -"[#4061](https://github.com/adap/flower/pull/4061), " -"[#3984](https://github.com/adap/flower/pull/3984), " -"[#3917](https://github.com/adap/flower/pull/3917), " -"[#3900](https://github.com/adap/flower/pull/3900), " -"[#4066](https://github.com/adap/flower/pull/4066), " -"[#3765](https://github.com/adap/flower/pull/3765), " -"[#4021](https://github.com/adap/flower/pull/4021), " -"[#3906](https://github.com/adap/flower/pull/3906), " -"[#4063](https://github.com/adap/flower/pull/4063), " -"[#4076](https://github.com/adap/flower/pull/4076), " -"[#3920](https://github.com/adap/flower/pull/3920), " -"[#3916](https://github.com/adap/flower/pull/3916))" -msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"**Support NumPy `2.0`** " +"([#4440](https://github.com/adap/flower/pull/4440))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:90 +#: ../../source/ref-changelog.md:96 +#, fuzzy msgid "" -"Many parts of the documentation, including the main tutorial, have been " -"migrated to show new Flower APIs and other new Flower features like the " -"improved Docker support." +"**Update infrastructure and CI/CD** " +"([#4466](https://github.com/adap/flower/pull/4466), " +"[#4419](https://github.com/adap/flower/pull/4419), " +"[#4338](https://github.com/adap/flower/pull/4338), " +"[#4334](https://github.com/adap/flower/pull/4334), " +"[#4456](https://github.com/adap/flower/pull/4456), " +"[#4446](https://github.com/adap/flower/pull/4446), " +"[#4415](https://github.com/adap/flower/pull/4415))" msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:92 +#: ../../source/ref-changelog.md:98 +#, fuzzy msgid "" -"**Migrate code example to use new Flower APIs** " -"([#3758](https://github.com/adap/flower/pull/3758), " -"[#3701](https://github.com/adap/flower/pull/3701), " -"[#3919](https://github.com/adap/flower/pull/3919), " -"[#3918](https://github.com/adap/flower/pull/3918), " -"[#3934](https://github.com/adap/flower/pull/3934), " -"[#3893](https://github.com/adap/flower/pull/3893), " -"[#3833](https://github.com/adap/flower/pull/3833), " -"[#3922](https://github.com/adap/flower/pull/3922), " -"[#3846](https://github.com/adap/flower/pull/3846), " -"[#3777](https://github.com/adap/flower/pull/3777), " -"[#3874](https://github.com/adap/flower/pull/3874), " -"[#3873](https://github.com/adap/flower/pull/3873), " -"[#3935](https://github.com/adap/flower/pull/3935), " -"[#3754](https://github.com/adap/flower/pull/3754), " -"[#3980](https://github.com/adap/flower/pull/3980), " -"[#4089](https://github.com/adap/flower/pull/4089), " -"[#4046](https://github.com/adap/flower/pull/4046), " -"[#3314](https://github.com/adap/flower/pull/3314), " -"[#3316](https://github.com/adap/flower/pull/3316), " -"[#3295](https://github.com/adap/flower/pull/3295), " -"[#3313](https://github.com/adap/flower/pull/3313))" -msgstr "" - -#: ../../source/ref-changelog.md:94 -msgid "Many code examples have been migrated to use new Flower APIs." +"**Bugfixes** ([#4404](https://github.com/adap/flower/pull/4404), " +"[#4518](https://github.com/adap/flower/pull/4518), " +"[#4452](https://github.com/adap/flower/pull/4452), " +"[#4376](https://github.com/adap/flower/pull/4376), " +"[#4493](https://github.com/adap/flower/pull/4493), " +"[#4436](https://github.com/adap/flower/pull/4436), " +"[#4410](https://github.com/adap/flower/pull/4410), " +"[#4442](https://github.com/adap/flower/pull/4442), " +"[#4375](https://github.com/adap/flower/pull/4375), " +"[#4515](https://github.com/adap/flower/pull/4515))" msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:96 +#: ../../source/ref-changelog.md:100 msgid "" -"**Update Flower framework, framework internals and quality " -"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " -"[#4053](https://github.com/adap/flower/pull/4053), " -"[#4098](https://github.com/adap/flower/pull/4098), " -"[#4067](https://github.com/adap/flower/pull/4067), " -"[#4105](https://github.com/adap/flower/pull/4105), " -"[#4048](https://github.com/adap/flower/pull/4048), " -"[#4107](https://github.com/adap/flower/pull/4107), " -"[#4069](https://github.com/adap/flower/pull/4069), " -"[#3915](https://github.com/adap/flower/pull/3915), " -"[#4101](https://github.com/adap/flower/pull/4101), " -"[#4108](https://github.com/adap/flower/pull/4108), " -"[#3914](https://github.com/adap/flower/pull/3914), " -"[#4068](https://github.com/adap/flower/pull/4068), " -"[#4041](https://github.com/adap/flower/pull/4041), " -"[#4040](https://github.com/adap/flower/pull/4040), " -"[#3986](https://github.com/adap/flower/pull/3986), " -"[#4026](https://github.com/adap/flower/pull/4026), " -"[#3961](https://github.com/adap/flower/pull/3961), " -"[#3975](https://github.com/adap/flower/pull/3975), " -"[#3983](https://github.com/adap/flower/pull/3983), " -"[#4091](https://github.com/adap/flower/pull/4091), " -"[#3982](https://github.com/adap/flower/pull/3982), " -"[#4079](https://github.com/adap/flower/pull/4079), " -"[#4073](https://github.com/adap/flower/pull/4073), " -"[#4060](https://github.com/adap/flower/pull/4060), " -"[#4106](https://github.com/adap/flower/pull/4106), " -"[#4080](https://github.com/adap/flower/pull/4080), " -"[#3974](https://github.com/adap/flower/pull/3974), " -"[#3996](https://github.com/adap/flower/pull/3996), " -"[#3991](https://github.com/adap/flower/pull/3991), " -"[#3981](https://github.com/adap/flower/pull/3981), " -"[#4093](https://github.com/adap/flower/pull/4093), " -"[#4100](https://github.com/adap/flower/pull/4100), " -"[#3939](https://github.com/adap/flower/pull/3939), " -"[#3955](https://github.com/adap/flower/pull/3955), " -"[#3940](https://github.com/adap/flower/pull/3940), " -"[#4038](https://github.com/adap/flower/pull/4038))" -msgstr "" +"**General improvements** " +"([#4454](https://github.com/adap/flower/pull/4454), " +"[#4365](https://github.com/adap/flower/pull/4365), " +"[#4423](https://github.com/adap/flower/pull/4423), " +"[#4516](https://github.com/adap/flower/pull/4516), " +"[#4509](https://github.com/adap/flower/pull/4509), " +"[#4498](https://github.com/adap/flower/pull/4498), " +"[#4371](https://github.com/adap/flower/pull/4371), " +"[#4449](https://github.com/adap/flower/pull/4449), " +"[#4488](https://github.com/adap/flower/pull/4488), " +"[#4478](https://github.com/adap/flower/pull/4478), " +"[#4392](https://github.com/adap/flower/pull/4392), " +"[#4483](https://github.com/adap/flower/pull/4483), " +"[#4517](https://github.com/adap/flower/pull/4517), " +"[#4330](https://github.com/adap/flower/pull/4330), " +"[#4458](https://github.com/adap/flower/pull/4458), " +"[#4347](https://github.com/adap/flower/pull/4347), " +"[#4429](https://github.com/adap/flower/pull/4429), " +"[#4463](https://github.com/adap/flower/pull/4463), " +"[#4496](https://github.com/adap/flower/pull/4496), " +"[#4508](https://github.com/adap/flower/pull/4508), " +"[#4444](https://github.com/adap/flower/pull/4444), " +"[#4417](https://github.com/adap/flower/pull/4417), " +"[#4504](https://github.com/adap/flower/pull/4504), " +"[#4418](https://github.com/adap/flower/pull/4418), " +"[#4480](https://github.com/adap/flower/pull/4480), " +"[#4455](https://github.com/adap/flower/pull/4455), " +"[#4468](https://github.com/adap/flower/pull/4468), " +"[#4385](https://github.com/adap/flower/pull/4385), " +"[#4487](https://github.com/adap/flower/pull/4487), " +"[#4393](https://github.com/adap/flower/pull/4393), " +"[#4489](https://github.com/adap/flower/pull/4489), " +"[#4389](https://github.com/adap/flower/pull/4389), " +"[#4507](https://github.com/adap/flower/pull/4507), " +"[#4469](https://github.com/adap/flower/pull/4469), " +"[#4340](https://github.com/adap/flower/pull/4340), " +"[#4353](https://github.com/adap/flower/pull/4353), " +"[#4494](https://github.com/adap/flower/pull/4494), " +"[#4461](https://github.com/adap/flower/pull/4461), " +"[#4362](https://github.com/adap/flower/pull/4362), " +"[#4473](https://github.com/adap/flower/pull/4473), " +"[#4405](https://github.com/adap/flower/pull/4405), " +"[#4416](https://github.com/adap/flower/pull/4416), " +"[#4453](https://github.com/adap/flower/pull/4453), " +"[#4491](https://github.com/adap/flower/pull/4491), " +"[#4539](https://github.com/adap/flower/pull/4539), " +"[#4542](https://github.com/adap/flower/pull/4542), " +"[#4538](https://github.com/adap/flower/pull/4538), " +"[#4543](https://github.com/adap/flower/pull/4543), " +"[#4541](https://github.com/adap/flower/pull/4541), " +"[#4550](https://github.com/adap/flower/pull/4550), " +"[#4481](https://github.com/adap/flower/pull/4481))" +msgstr "" + +#: ../../source/ref-changelog.md:104 ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:420 ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:1495 +msgid "Deprecations" +msgstr "停用" + +#: ../../source/ref-changelog.md:106 +#, fuzzy +msgid "**Deprecate Python 3.9**" +msgstr "** 过时的 Python 3.7**" -#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:108 msgid "" -"As always, many parts of the Flower framework and quality infrastructure " -"were improved and updated." +"Flower is deprecating support for Python 3.9 as several of its " +"dependencies are phasing out compatibility with this version. While no " +"immediate changes have been made, users are encouraged to plan for " +"upgrading to a supported Python version." msgstr "" -#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 -#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 -msgid "Deprecations" -msgstr "停用" +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:200 +#: ../../source/ref-changelog.md:234 ../../source/ref-changelog.md:314 +#: ../../source/ref-changelog.md:430 ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:600 ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:787 ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:941 ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:1068 ../../source/ref-changelog.md:1130 +#: ../../source/ref-changelog.md:1149 ../../source/ref-changelog.md:1305 +#: ../../source/ref-changelog.md:1376 ../../source/ref-changelog.md:1413 +#: ../../source/ref-changelog.md:1456 +msgid "Incompatible changes" +msgstr "不兼容的更改" -#: ../../source/ref-changelog.md:102 +#: ../../source/ref-changelog.md:112 #, fuzzy msgid "" -"**Deprecate accessing `Context` via `Client.context`** " -"([#3797](https://github.com/adap/flower/pull/3797))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" +"**Remove `flower-superexec` command** " +"([#4351](https://github.com/adap/flower/pull/4351))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:114 msgid "" -"Now that both `client_fn` and `server_fn` receive a `Context` object, " -"accessing `Context` via `Client.context` is deprecated. `Client.context` " -"will be removed in a future release. If you need to access `Context` in " -"your `Client` implementation, pass it manually when creating the `Client`" -" instance in `client_fn`:" +"The `flower-superexec` command, previously used to launch SuperExec, is " +"no longer functional as SuperExec has been merged into SuperLink. " +"Starting an additional SuperExec is no longer necessary when SuperLink is" +" initiated." msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:116 #, fuzzy msgid "" -"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" -" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " -"[#4077](https://github.com/adap/flower/pull/4077), " -"[#3850](https://github.com/adap/flower/pull/3850))" -msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"**Remove `flower-server-app` command** " +"([#4490](https://github.com/adap/flower/pull/4490))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:118 msgid "" -"The CLI commands `flower-supernode` and `flower-server-app` now accept an" -" app directory as argument (instead of references to a `ClientApp` or " -"`ServerApp`). An app directory is any directory containing a " -"`pyproject.toml` file (with the appropriate Flower config fields set). " -"The easiest way to generate a compatible project structure is to use " -"`flwr new`." +"The `flower-server-app` command has been removed. To start a Flower app, " +"please use the `flwr run` command instead." msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:120 #, fuzzy msgid "" -"**Disable** `flower-client-app` **CLI command** " -"([#4022](https://github.com/adap/flower/pull/4022))" -msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**Remove `app` argument from `flower-supernode` command** " +"([#4497](https://github.com/adap/flower/pull/4497))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:119 -msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." +#: ../../source/ref-changelog.md:122 +msgid "" +"The usage of `flower-supernode ` has been removed. SuperNode " +"will now load the FAB delivered by SuperLink, and it is no longer " +"possible to directly specify an app directory." msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:124 #, fuzzy msgid "" -"**Use spaces instead of commas for separating config args** " -"([#4000](https://github.com/adap/flower/pull/4000))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"**Remove support for non-app simulations** " +"([#4431](https://github.com/adap/flower/pull/4431))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:126 msgid "" -"When passing configs (run config, node config) to Flower, you now need to" -" separate key-value pairs using spaces instead of commas. For example:" +"The simulation engine (via `flower-simulation`) now exclusively supports " +"passing an app." msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "Previously, you could pass configs using commas, like this:" +#: ../../source/ref-changelog.md:128 +#, fuzzy +msgid "" +"**Rename CLI arguments for `flower-superlink` command** " +"([#4412](https://github.com/adap/flower/pull/4412))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" + +#: ../../source/ref-changelog.md:130 +msgid "" +"The `--driver-api-address` argument has been renamed to `--serverappio-" +"api-address` in the `flower-superlink` command to reflect the renaming of" +" the `Driver` service to the `ServerAppIo` service." msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:132 #, fuzzy msgid "" -"**Remove** `flwr example` **CLI command** " -"([#4084](https://github.com/adap/flower/pull/4084))" +"**Rename CLI arguments for `flwr-serverapp` and `flwr-clientapp` " +"commands** ([#4495](https://github.com/adap/flower/pull/4495))" msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:134 msgid "" -"The experimental `flwr example` CLI command has been removed. Use `flwr " -"new` to generate a project and then run it using `flwr run`." +"The CLI arguments have been renamed for clarity and consistency. " +"Specifically, `--superlink` for `flwr-serverapp` is now `--serverappio-" +"api-address`, and `--supernode` for `flwr-clientapp` is now " +"`--clientappio-api-address`." msgstr "" -#: ../../source/ref-changelog.md:139 +#: ../../source/ref-changelog.md:136 #, fuzzy -msgid "v1.10.0 (2024-07-24)" -msgstr "v1.0.0 (2022-07-28)" +msgid "v1.12.0 (2024-10-14)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:142 #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " -"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " -"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " +"`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel" +" J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, " +"`Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan " +"Gao`, `xiliguguagua` " msgstr "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -19045,19 +18895,41 @@ msgstr "" "Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:146 #, fuzzy msgid "" -"**Introduce** `flwr run` **(beta)** " -"([#3810](https://github.com/adap/flower/pull/3810), " -"[#3826](https://github.com/adap/flower/pull/3826), " -"[#3880](https://github.com/adap/flower/pull/3880), " -"[#3807](https://github.com/adap/flower/pull/3807), " -"[#3800](https://github.com/adap/flower/pull/3800), " -"[#3814](https://github.com/adap/flower/pull/3814), " -"[#3811](https://github.com/adap/flower/pull/3811), " -"[#3809](https://github.com/adap/flower/pull/3809), " -"[#3819](https://github.com/adap/flower/pull/3819))" +"**Introduce SuperExec log streaming** " +"([#3577](https://github.com/adap/flower/pull/3577), " +"[#3584](https://github.com/adap/flower/pull/3584), " +"[#4242](https://github.com/adap/flower/pull/4242), " +"[#3611](https://github.com/adap/flower/pull/3611), " +"[#3613](https://github.com/adap/flower/pull/3613))" +msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" + +#: ../../source/ref-changelog.md:148 +msgid "" +"Flower now supports log streaming from a remote SuperExec using the `flwr" +" log` command. This new feature allows you to monitor logs from SuperExec" +" in real time via `flwr log ` (or `flwr log " +"`)." +msgstr "" + +#: ../../source/ref-changelog.md:150 +#, fuzzy +msgid "" +"**Improve `flwr new` templates** " +"([#4291](https://github.com/adap/flower/pull/4291), " +"[#4292](https://github.com/adap/flower/pull/4292), " +"[#4293](https://github.com/adap/flower/pull/4293), " +"[#4294](https://github.com/adap/flower/pull/4294), " +"[#4295](https://github.com/adap/flower/pull/4295))" msgstr "" "**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " "[#1470](https://github.com/adap/flower/pull/1470), " @@ -19066,123 +18938,173 @@ msgstr "" "[#1474](https://github.com/adap/flower/pull/1474), " "[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:152 msgid "" -"Flower 1.10 ships the first beta release of the new `flwr run` command. " -"`flwr run` can run different projects using `flwr run path/to/project`, " -"it enables you to easily switch between different federations using `flwr" -" run . federation` and it runs your Flower project using either local " -"simulation or the new (experimental) SuperExec service. This allows " -"Flower to scale federatated learning from fast local simulation to large-" -"scale production deployment, seamlessly. All projects generated with " -"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " -"`flwr new` to generate a project and then run it using `flwr run`." +"The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and " +"PyTorch have been updated to improve usability and consistency across " +"frameworks." msgstr "" -#: ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:154 #, fuzzy msgid "" -"**Introduce run config** " -"([#3751](https://github.com/adap/flower/pull/3751), " -"[#3750](https://github.com/adap/flower/pull/3750), " -"[#3845](https://github.com/adap/flower/pull/3845), " -"[#3824](https://github.com/adap/flower/pull/3824), " -"[#3746](https://github.com/adap/flower/pull/3746), " -"[#3728](https://github.com/adap/flower/pull/3728), " -"[#3730](https://github.com/adap/flower/pull/3730), " -"[#3725](https://github.com/adap/flower/pull/3725), " -"[#3729](https://github.com/adap/flower/pull/3729), " -"[#3580](https://github.com/adap/flower/pull/3580), " -"[#3578](https://github.com/adap/flower/pull/3578), " -"[#3576](https://github.com/adap/flower/pull/3576), " -"[#3798](https://github.com/adap/flower/pull/3798), " -"[#3732](https://github.com/adap/flower/pull/3732), " -"[#3815](https://github.com/adap/flower/pull/3815))" +"**Migrate ID handling to use unsigned 64-bit integers** " +"([#4170](https://github.com/adap/flower/pull/4170), " +"[#4237](https://github.com/adap/flower/pull/4237), " +"[#4243](https://github.com/adap/flower/pull/4243))" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:156 msgid "" -"The new run config feature allows you to run your Flower project in " -"different configurations without having to change a single line of code. " -"You can now build a configurable `ServerApp` and `ClientApp` that read " -"configuration values at runtime. This enables you to specify config " -"values like `learning-rate=0.01` in `pyproject.toml` (under the " -"`[tool.flwr.app.config]` key). These config values can then be easily " -"overridden via `flwr run --run-config learning-rate=0.02`, and read from " -"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " -"new project using `flwr new` to see run config in action." +"Node IDs, run IDs, and related fields have been migrated from signed " +"64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To " +"support this change, the `uint64` type is fully supported in all " +"communications. You may now use `uint64` values in config and metric " +"dictionaries. For Python users, that means using `int` values larger than" +" the maximum value of `sint64` but less than the maximum value of " +"`uint64`." msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:158 #, fuzzy msgid "" -"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " -"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " -"[#3697](https://github.com/adap/flower/pull/3697), " -"[#3694](https://github.com/adap/flower/pull/3694), " -"[#3696](https://github.com/adap/flower/pull/3696))" +"**Add Flower architecture explanation** " +"([#3270](https://github.com/adap/flower/pull/3270))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" + +#: ../../source/ref-changelog.md:160 +msgid "" +"A new [Flower architecture explainer](https://flower.ai/docs/framework" +"/explanation-flower-architecture.html) page introduces Flower components " +"step-by-step. Check out the `EXPLANATIONS` section of the Flower " +"documentation if you're interested." msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:162 +#, fuzzy msgid "" -"The `client_fn` signature has been generalized to `client_fn(context: " -"Context) -> Client`. It now receives a `Context` object instead of the " -"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " -"`node_config` and `run_config`, among other things. This enables you to " -"build a configurable `ClientApp` that leverages the new run config " -"system." +"**Introduce FedRep baseline** " +"([#3790](https://github.com/adap/flower/pull/3790))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:164 +msgid "" +"FedRep is a federated learning algorithm that learns shared data " +"representations across clients while allowing each to maintain " +"personalized local models, balancing collaboration and individual " +"adaptation. Read all the details in the paper: \"Exploiting Shared " +"Representations for Personalized Federated Learning\" " +"([arxiv](https://arxiv.org/abs/2102.07078))" +msgstr "" + +#: ../../source/ref-changelog.md:166 +#, fuzzy +msgid "" +"**Improve FlowerTune template and LLM evaluation pipelines** " +"([#4286](https://github.com/adap/flower/pull/4286), " +"[#3769](https://github.com/adap/flower/pull/3769), " +"[#4272](https://github.com/adap/flower/pull/4272), " +"[#4257](https://github.com/adap/flower/pull/4257), " +"[#4220](https://github.com/adap/flower/pull/4220), " +"[#4282](https://github.com/adap/flower/pull/4282), " +"[#4171](https://github.com/adap/flower/pull/4171), " +"[#4228](https://github.com/adap/flower/pull/4228), " +"[#4258](https://github.com/adap/flower/pull/4258), " +"[#4296](https://github.com/adap/flower/pull/4296), " +"[#4287](https://github.com/adap/flower/pull/4287), " +"[#4217](https://github.com/adap/flower/pull/4217), " +"[#4249](https://github.com/adap/flower/pull/4249), " +"[#4324](https://github.com/adap/flower/pull/4324), " +"[#4219](https://github.com/adap/flower/pull/4219), " +"[#4327](https://github.com/adap/flower/pull/4327))" msgstr "" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:168 msgid "" -"The previous signature `client_fn(cid: str)` is now deprecated and " -"support for it will be removed in a future release. Use " -"`client_fn(context: Context) -> Client` everywhere." +"Refined evaluation pipelines, metrics, and documentation for the upcoming" +" FlowerTune LLM Leaderboard across multiple domains including Finance, " +"Medical, and general NLP. Stay tuned for the official launch—we welcome " +"all federated learning and LLM enthusiasts to participate in this " +"exciting challenge!" msgstr "" -#: ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:170 #, fuzzy msgid "" -"**Introduce new** `server_fn(context)` " -"([#3773](https://github.com/adap/flower/pull/3773), " -"[#3796](https://github.com/adap/flower/pull/3796), " -"[#3771](https://github.com/adap/flower/pull/3771))" +"**Enhance Docker Support and Documentation** " +"([#4191](https://github.com/adap/flower/pull/4191), " +"[#4251](https://github.com/adap/flower/pull/4251), " +"[#4190](https://github.com/adap/flower/pull/4190), " +"[#3928](https://github.com/adap/flower/pull/3928), " +"[#4298](https://github.com/adap/flower/pull/4298), " +"[#4192](https://github.com/adap/flower/pull/4192), " +"[#4136](https://github.com/adap/flower/pull/4136), " +"[#4187](https://github.com/adap/flower/pull/4187), " +"[#4261](https://github.com/adap/flower/pull/4261), " +"[#4177](https://github.com/adap/flower/pull/4177), " +"[#4176](https://github.com/adap/flower/pull/4176), " +"[#4189](https://github.com/adap/flower/pull/4189), " +"[#4297](https://github.com/adap/flower/pull/4297), " +"[#4226](https://github.com/adap/flower/pull/4226))" msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:172 msgid "" -"In addition to the new `client_fn(context:Context)`, a new " -"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" -" `ServerApp` (instead of passing, for example, `Strategy`, directly). " -"This enables you to leverage the full `Context` on the server-side to " -"build a configurable `ServerApp`." +"Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images," +" and comprehensively updated [Docker " +"documentation](https://flower.ai/docs/framework/docker/index.html) " +"including quickstart guides and distributed Docker Compose instructions." msgstr "" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-changelog.md:174 #, fuzzy msgid "" -"**Relaunch all** `flwr new` **templates** " -"([#3877](https://github.com/adap/flower/pull/3877), " -"[#3821](https://github.com/adap/flower/pull/3821), " -"[#3587](https://github.com/adap/flower/pull/3587), " -"[#3795](https://github.com/adap/flower/pull/3795), " -"[#3875](https://github.com/adap/flower/pull/3875), " -"[#3859](https://github.com/adap/flower/pull/3859), " -"[#3760](https://github.com/adap/flower/pull/3760))" +"**Introduce Flower glossary** " +"([#4165](https://github.com/adap/flower/pull/4165), " +"[#4235](https://github.com/adap/flower/pull/4235))" +msgstr "" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" + +#: ../../source/ref-changelog.md:176 +msgid "" +"Added the [Federated Learning glossary](https://flower.ai/glossary/) to " +"the Flower repository, located under the `flower/glossary/` directory. " +"This resource aims to provide clear definitions and explanations of key " +"FL concepts. Community contributions are highly welcomed to help expand " +"and refine this knowledge base — this is probably the easiest way to " +"become a Flower contributor!" +msgstr "" + +#: ../../source/ref-changelog.md:178 +#, fuzzy +msgid "" +"**Implement Message Time-to-Live (TTL)** " +"([#3620](https://github.com/adap/flower/pull/3620), " +"[#3596](https://github.com/adap/flower/pull/3596), " +"[#3615](https://github.com/adap/flower/pull/3615), " +"[#3609](https://github.com/adap/flower/pull/3609), " +"[#3635](https://github.com/adap/flower/pull/3635))" msgstr "" "** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " "[#1628](https://github.com/adap/flower/pull/1628), " @@ -19192,512 +19114,520 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:180 msgid "" -"All `flwr new` templates have been significantly updated to showcase new " -"Flower features and best practices. This includes using `flwr run` and " -"the new run config feature. You can now easily create a new project using" -" `flwr new` and, after following the instructions to install it, `flwr " -"run` it." +"Added comprehensive TTL support for messages in Flower's SuperLink. " +"Messages are now automatically expired and cleaned up based on " +"configurable TTL values, available through the low-level API (and used by" +" default in the high-level API)." msgstr "" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:182 #, fuzzy msgid "" -"**Introduce** `flower-supernode` **(preview)** " -"([#3353](https://github.com/adap/flower/pull/3353))" +"**Improve FAB handling** " +"([#4303](https://github.com/adap/flower/pull/4303), " +"[#4264](https://github.com/adap/flower/pull/4264), " +"[#4305](https://github.com/adap/flower/pull/4305), " +"[#4304](https://github.com/adap/flower/pull/4304))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:184 msgid "" -"The new `flower-supernode` CLI is here to replace `flower-client-app`. " -"`flower-supernode` brings full multi-app support to the Flower client-" -"side. It also allows to pass `--node-config` to the SuperNode, which is " -"accessible in your `ClientApp` via `Context` (using the new " -"`client_fn(context: Context)` signature)." +"An 8-character hash is now appended to the FAB file name. The `flwr " +"install` command installs FABs with a more flattened folder structure, " +"reducing it from 3 levels to 1." msgstr "" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:186 #, fuzzy msgid "" -"**Introduce node config** " -"([#3782](https://github.com/adap/flower/pull/3782), " -"[#3780](https://github.com/adap/flower/pull/3780), " -"[#3695](https://github.com/adap/flower/pull/3695), " -"[#3886](https://github.com/adap/flower/pull/3886))" +"**Update documentation** " +"([#3341](https://github.com/adap/flower/pull/3341), " +"[#3338](https://github.com/adap/flower/pull/3338), " +"[#3927](https://github.com/adap/flower/pull/3927), " +"[#4152](https://github.com/adap/flower/pull/4152), " +"[#4151](https://github.com/adap/flower/pull/4151), " +"[#3993](https://github.com/adap/flower/pull/3993))" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:188 msgid "" -"A new node config feature allows you to pass a static configuration to " -"the SuperNode. This configuration is read-only and available to every " -"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " -"config via `Context` (`context.node_config`)." +"Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging " +"Face, Fastai) to use the new `flwr run` command and removed default title" +" from documentation base template. A new blockchain example has been " +"added to FAQ." msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:190 +#, fuzzy msgid "" -"**Introduce SuperExec (experimental)** " -"([#3605](https://github.com/adap/flower/pull/3605), " -"[#3723](https://github.com/adap/flower/pull/3723), " -"[#3731](https://github.com/adap/flower/pull/3731), " -"[#3589](https://github.com/adap/flower/pull/3589), " -"[#3604](https://github.com/adap/flower/pull/3604), " -"[#3622](https://github.com/adap/flower/pull/3622), " -"[#3838](https://github.com/adap/flower/pull/3838), " -"[#3720](https://github.com/adap/flower/pull/3720), " -"[#3606](https://github.com/adap/flower/pull/3606), " -"[#3602](https://github.com/adap/flower/pull/3602), " -"[#3603](https://github.com/adap/flower/pull/3603), " -"[#3555](https://github.com/adap/flower/pull/3555), " -"[#3808](https://github.com/adap/flower/pull/3808), " -"[#3724](https://github.com/adap/flower/pull/3724), " -"[#3658](https://github.com/adap/flower/pull/3658), " -"[#3629](https://github.com/adap/flower/pull/3629))" +"**Update example projects** " +"([#3716](https://github.com/adap/flower/pull/3716), " +"[#4007](https://github.com/adap/flower/pull/4007), " +"[#4130](https://github.com/adap/flower/pull/4130), " +"[#4234](https://github.com/adap/flower/pull/4234), " +"[#4206](https://github.com/adap/flower/pull/4206), " +"[#4188](https://github.com/adap/flower/pull/4188), " +"[#4247](https://github.com/adap/flower/pull/4247), " +"[#4331](https://github.com/adap/flower/pull/4331))" msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:192 msgid "" -"This is the first experimental release of Flower SuperExec, a new service" -" that executes your runs. It's not ready for production deployment just " -"yet, but don't hesitate to give it a try if you're interested." +"Refreshed multiple example projects including vertical FL, PyTorch " +"(advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized " +"Hugging Face quickstart with a smaller language model and removed legacy " +"simulation examples." msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:194 #, fuzzy msgid "" -"**Add new federated learning with tabular data example** " -"([#3568](https://github.com/adap/flower/pull/3568))" +"**Update translations** " +"([#4070](https://github.com/adap/flower/pull/4070), " +"[#4316](https://github.com/adap/flower/pull/4316), " +"[#4252](https://github.com/adap/flower/pull/4252), " +"[#4256](https://github.com/adap/flower/pull/4256), " +"[#4210](https://github.com/adap/flower/pull/4210), " +"[#4263](https://github.com/adap/flower/pull/4263), " +"[#4259](https://github.com/adap/flower/pull/4259))" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:196 msgid "" -"A new code example exemplifies a federated learning setup using the " -"Flower framework on the Adult Census Income tabular dataset." +"**General improvements** " +"([#4239](https://github.com/adap/flower/pull/4239), " +"[4276](https://github.com/adap/flower/pull/4276), " +"[4204](https://github.com/adap/flower/pull/4204), " +"[4184](https://github.com/adap/flower/pull/4184), " +"[4227](https://github.com/adap/flower/pull/4227), " +"[4183](https://github.com/adap/flower/pull/4183), " +"[4202](https://github.com/adap/flower/pull/4202), " +"[4250](https://github.com/adap/flower/pull/4250), " +"[4267](https://github.com/adap/flower/pull/4267), " +"[4246](https://github.com/adap/flower/pull/4246), " +"[4240](https://github.com/adap/flower/pull/4240), " +"[4265](https://github.com/adap/flower/pull/4265), " +"[4238](https://github.com/adap/flower/pull/4238), " +"[4275](https://github.com/adap/flower/pull/4275), " +"[4318](https://github.com/adap/flower/pull/4318), " +"[#4178](https://github.com/adap/flower/pull/4178), " +"[#4315](https://github.com/adap/flower/pull/4315), " +"[#4241](https://github.com/adap/flower/pull/4241), " +"[#4289](https://github.com/adap/flower/pull/4289), " +"[#4290](https://github.com/adap/flower/pull/4290), " +"[#4181](https://github.com/adap/flower/pull/4181), " +"[#4208](https://github.com/adap/flower/pull/4208), " +"[#4225](https://github.com/adap/flower/pull/4225), " +"[#4314](https://github.com/adap/flower/pull/4314), " +"[#4174](https://github.com/adap/flower/pull/4174), " +"[#4203](https://github.com/adap/flower/pull/4203), " +"[#4274](https://github.com/adap/flower/pull/4274), " +"[#3154](https://github.com/adap/flower/pull/3154), " +"[#4201](https://github.com/adap/flower/pull/4201), " +"[#4268](https://github.com/adap/flower/pull/4268), " +"[#4254](https://github.com/adap/flower/pull/4254), " +"[#3990](https://github.com/adap/flower/pull/3990), " +"[#4212](https://github.com/adap/flower/pull/4212), " +"[#2938](https://github.com/adap/flower/pull/2938), " +"[#4205](https://github.com/adap/flower/pull/4205), " +"[#4222](https://github.com/adap/flower/pull/4222), " +"[#4313](https://github.com/adap/flower/pull/4313), " +"[#3936](https://github.com/adap/flower/pull/3936), " +"[#4278](https://github.com/adap/flower/pull/4278), " +"[#4319](https://github.com/adap/flower/pull/4319), " +"[#4332](https://github.com/adap/flower/pull/4332), " +"[#4333](https://github.com/adap/flower/pull/4333))" +msgstr "" + +#: ../../source/ref-changelog.md:202 +#, fuzzy +msgid "" +"**Drop Python 3.8 support and update minimum version to 3.9** " +"([#4180](https://github.com/adap/flower/pull/4180), " +"[#4213](https://github.com/adap/flower/pull/4213), " +"[#4193](https://github.com/adap/flower/pull/4193), " +"[#4199](https://github.com/adap/flower/pull/4199), " +"[#4196](https://github.com/adap/flower/pull/4196), " +"[#4195](https://github.com/adap/flower/pull/4195), " +"[#4198](https://github.com/adap/flower/pull/4198), " +"[#4194](https://github.com/adap/flower/pull/4194))" msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:187 -#, fuzzy +#: ../../source/ref-changelog.md:204 msgid "" -"**Create generic adapter layer (preview)** " -"([#3538](https://github.com/adap/flower/pull/3538), " -"[#3536](https://github.com/adap/flower/pull/3536), " -"[#3540](https://github.com/adap/flower/pull/3540))" +"Python 3.8 support was deprecated in Flower 1.9, and this release removes" +" support. Flower now requires Python 3.9 or later (Python 3.11 is " +"recommended). CI and documentation were updated to use Python 3.9 as the " +"minimum supported version. Flower now supports Python 3.9 to 3.12." msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:189 -msgid "" -"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" -" with Flower in a transparent way. This makes Flower more modular and " -"allows for integration into other federated learning solutions and " -"platforms." -msgstr "" +#: ../../source/ref-changelog.md:206 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:212 #, fuzzy msgid "" -"**Refactor Flower Simulation Engine** " -"([#3581](https://github.com/adap/flower/pull/3581), " -"[#3471](https://github.com/adap/flower/pull/3471), " -"[#3804](https://github.com/adap/flower/pull/3804), " -"[#3468](https://github.com/adap/flower/pull/3468), " -"[#3839](https://github.com/adap/flower/pull/3839), " -"[#3806](https://github.com/adap/flower/pull/3806), " -"[#3861](https://github.com/adap/flower/pull/3861), " -"[#3543](https://github.com/adap/flower/pull/3543), " -"[#3472](https://github.com/adap/flower/pull/3472), " -"[#3829](https://github.com/adap/flower/pull/3829), " -"[#3469](https://github.com/adap/flower/pull/3469))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " + +#: ../../source/ref-changelog.md:214 +#, fuzzy +msgid "Improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:216 +#, fuzzy msgid "" -"The Simulation Engine was significantly refactored. This results in " -"faster and more stable simulations. It is also the foundation for " -"upcoming changes that aim to provide the next level of performance and " -"configurability in federated learning simulations." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:218 #, fuzzy msgid "" -"**Optimize Docker containers** " -"([#3591](https://github.com/adap/flower/pull/3591))" -msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:220 +#, fuzzy msgid "" -"Flower Docker containers were optimized and updated to use that latest " -"Flower framework features." -msgstr "" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:222 #, fuzzy msgid "" -"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " -"[#3789](https://github.com/adap/flower/pull/3789))" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:224 +#, fuzzy msgid "" -"Improved logging aims to be more concise and helpful to show you the " -"details you actually care about." -msgstr "" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:226 #, fuzzy msgid "" -"**Refactor framework internals** " -"([#3621](https://github.com/adap/flower/pull/3621), " -"[#3792](https://github.com/adap/flower/pull/3792), " -"[#3772](https://github.com/adap/flower/pull/3772), " -"[#3805](https://github.com/adap/flower/pull/3805), " -"[#3583](https://github.com/adap/flower/pull/3583), " -"[#3825](https://github.com/adap/flower/pull/3825), " -"[#3597](https://github.com/adap/flower/pull/3597), " -"[#3802](https://github.com/adap/flower/pull/3802), " -"[#3569](https://github.com/adap/flower/pull/3569))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:207 -#, fuzzy -msgid "Documentation improvements" -msgstr "可选的改进措施" - -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:228 #, fuzzy msgid "" -"**Add 🇰🇷 Korean translations** " -"([#3680](https://github.com/adap/flower/pull/3680))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:230 #, fuzzy msgid "" -"**Update translations** " -"([#3586](https://github.com/adap/flower/pull/3586), " -"[#3679](https://github.com/adap/flower/pull/3679), " -"[#3570](https://github.com/adap/flower/pull/3570), " -"[#3681](https://github.com/adap/flower/pull/3681), " -"[#3617](https://github.com/adap/flower/pull/3617), " -"[#3674](https://github.com/adap/flower/pull/3674), " -"[#3671](https://github.com/adap/flower/pull/3671), " -"[#3572](https://github.com/adap/flower/pull/3572), " -"[#3631](https://github.com/adap/flower/pull/3631))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-changelog.md:232 #, fuzzy msgid "" -"**Update documentation** " -"([#3864](https://github.com/adap/flower/pull/3864), " -"[#3688](https://github.com/adap/flower/pull/3688), " -"[#3562](https://github.com/adap/flower/pull/3562), " -"[#3641](https://github.com/adap/flower/pull/3641), " -"[#3384](https://github.com/adap/flower/pull/3384), " -"[#3634](https://github.com/adap/flower/pull/3634), " -"[#3823](https://github.com/adap/flower/pull/3823), " -"[#3793](https://github.com/adap/flower/pull/3793), " -"[#3707](https://github.com/adap/flower/pull/3707))" -msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:238 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:244 +#, fuzzy msgid "" -"Updated documentation includes new install instructions for different " -"shells, a new Flower Code Examples documentation landing page, new `flwr`" -" CLI docs and an updated federated XGBoost code example." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:219 -msgid "**Deprecate** `client_fn(cid: str)`" +#: ../../source/ref-changelog.md:248 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:250 msgid "" -"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " -"This signature is now deprecated. Use the new signature " -"`client_fn(context: Context) -> Client` instead. The new argument " -"`context` allows accessing `node_id`, `node_config`, `run_config` and " -"other `Context` features. When running using the simulation engine (or " -"using `flower-supernode` with a custom `--node-config partition-id=...`)," -" `context.node_config[\"partition-id\"]` will return an `int` partition " -"ID that can be used with Flower Datasets to load a different partition of" -" the dataset on each simulated or deployed SuperNode." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:252 msgid "" -"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" -" `ServerApp` **directly**" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "" -"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " -"is now deprecated. Instead of passing " -"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " -"pass them wrapped in a `server_fn(context: Context) -> " -"ServerAppComponents` function, like this: " -"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " -"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " -"to that, `server_fn` allows you to access `Context` (for example, to read" -" the `run_config`)." +#: ../../source/ref-changelog.md:254 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:256 #, fuzzy msgid "" -"**Remove support for `client_ids` in `start_simulation`** " -"([#3699](https://github.com/adap/flower/pull/3699))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" +msgstr "" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:258 msgid "" -"The (rarely used) feature that allowed passing custom `client_ids` to the" -" `start_simulation` function was removed. This removal is part of a " -"bigger effort to refactor the simulation engine and unify how the Flower " -"internals work in simulation and deployment." +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:233 -#, fuzzy -msgid "" -"**Remove `flower-driver-api` and `flower-fleet-api`** " -"([#3418](https://github.com/adap/flower/pull/3418))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +#: ../../source/ref-changelog.md:260 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:262 msgid "" -"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" -"api` were removed in an effort to streamline the SuperLink developer " -"experience. Use `flower-superlink` instead." +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-changelog.md:237 -#, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:263 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." +msgstr "" -#: ../../source/ref-changelog.md:243 -#, fuzzy +#: ../../source/ref-changelog.md:264 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:247 +#: ../../source/ref-changelog.md:266 #, fuzzy msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:249 +#: ../../source/ref-changelog.md:268 msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-changelog.md:251 -msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +#: ../../source/ref-changelog.md:270 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-changelog.md:253 -#, fuzzy +#: ../../source/ref-changelog.md:271 msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" -msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" - -#: ../../source/ref-changelog.md:255 -msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-changelog.md:257 -#, fuzzy +#: ../../source/ref-changelog.md:272 msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:259 +#: ../../source/ref-changelog.md:273 msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-changelog.md:261 +#: ../../source/ref-changelog.md:275 #, fuzzy msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:263 +#: ../../source/ref-changelog.md:277 msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-changelog.md:265 +#: ../../source/ref-changelog.md:279 #, fuzzy msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:281 msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:283 #, fuzzy msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" "**移除对 Python 3.7 的支持** " "([#2280](https://github.com/adap/flower/pull/2280), " @@ -19707,544 +19637,519 @@ msgstr "" "[#2355](https://github.com/adap/flower/pull/2355), " "[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:271 +#: ../../source/ref-changelog.md:285 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: ../../source/ref-changelog.md:273 +#: ../../source/ref-changelog.md:287 #, fuzzy msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:275 +#: ../../source/ref-changelog.md:289 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: ../../source/ref-changelog.md:277 +#: ../../source/ref-changelog.md:291 #, fuzzy msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:279 +#: ../../source/ref-changelog.md:293 msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." msgstr "" -#: ../../source/ref-changelog.md:281 -#, fuzzy +#: ../../source/ref-changelog.md:295 msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:283 -msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +#: ../../source/ref-changelog.md:297 +msgid "Many code examples have been migrated to use new Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:285 -#, fuzzy +#: ../../source/ref-changelog.md:299 msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:287 +#: ../../source/ref-changelog.md:305 +#, fuzzy msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." -msgstr "" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:289 +#: ../../source/ref-changelog.md:307 msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:291 +#: ../../source/ref-changelog.md:316 +#, fuzzy msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:293 +#: ../../source/ref-changelog.md:318 msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:295 +#: ../../source/ref-changelog.md:320 +#, fuzzy msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:297 -msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +#: ../../source/ref-changelog.md:322 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:299 +#: ../../source/ref-changelog.md:324 #, fuzzy msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" -msgstr "" -"`TensorFlow快速入门 (教程) `_" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:326 msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:303 -#, fuzzy -msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +#: ../../source/ref-changelog.md:332 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:305 +#: ../../source/ref-changelog.md:338 #, fuzzy -msgid "As always, Flower code examples have received many updates." -msgstr "许多 \"Flower \"代码示例得到了大幅更新。" +msgid "" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:307 +#: ../../source/ref-changelog.md:340 msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:311 +#: ../../source/ref-changelog.md:342 #, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "** 过时的 Python 3.8**" +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:313 +#: ../../source/ref-changelog.md:348 #, fuzzy msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.8 已于 2024-10-01 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" - -#: ../../source/ref-changelog.md:315 -#, fuzzy -msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:317 +#: ../../source/ref-changelog.md:352 +#, fuzzy msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:319 -#, fuzzy +#: ../../source/ref-changelog.md:354 msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:321 +#: ../../source/ref-changelog.md:356 +#, fuzzy msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:325 +#: ../../source/ref-changelog.md:358 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:327 +#: ../../source/ref-changelog.md:360 +#, fuzzy msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:329 -#, fuzzy +#: ../../source/ref-changelog.md:362 msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." +msgstr "" -#: ../../source/ref-changelog.md:331 +#: ../../source/ref-changelog.md:364 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:333 +#: ../../source/ref-changelog.md:366 #, fuzzy msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:335 +#: ../../source/ref-changelog.md:368 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:337 +#: ../../source/ref-changelog.md:370 #, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +msgid "" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:343 -#, fuzzy +#: ../../source/ref-changelog.md:372 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:347 +#: ../../source/ref-changelog.md:374 #, fuzzy msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"**介绍 Flower Next 高级应用程序接口(稳定版)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:349 -#, fuzzy +#: ../../source/ref-changelog.md:376 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " -"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " -"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " -"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " -"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" -#: ../../source/ref-changelog.md:351 +#: ../../source/ref-changelog.md:378 #, fuzzy msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:353 -#, fuzzy +#: ../../source/ref-changelog.md:380 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." +msgstr "" + +#: ../../source/ref-changelog.md:382 +msgid "" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" +msgstr "" + +#: ../../source/ref-changelog.md:384 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " -"还提供了新的 Flower Next " -"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " -"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " -"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " -"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " -"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" -#: ../../source/ref-changelog.md:355 +#: ../../source/ref-changelog.md:386 #, fuzzy msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" + +#: ../../source/ref-changelog.md:388 +msgid "" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:357 +#: ../../source/ref-changelog.md:390 #, fuzzy msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" + +#: ../../source/ref-changelog.md:392 +msgid "" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"Flower Modifiers(我们称之为 " -"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " -"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" -" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" -#: ../../source/ref-changelog.md:359 +#: ../../source/ref-changelog.md:394 #, fuzzy msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" "**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " "[#1470](https://github.com/adap/flower/pull/1470), " @@ -20253,261 +20158,226 @@ msgstr "" "[#1474](https://github.com/adap/flower/pull/1474), " "[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:361 -#, fuzzy +#: ../../source/ref-changelog.md:396 msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" -" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " -"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" -"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" -#: ../../source/ref-changelog.md:363 +#: ../../source/ref-changelog.md:398 #, fuzzy msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" -msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:365 -#, fuzzy +#: ../../source/ref-changelog.md:400 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" -" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" -"/explanation-differential-privacy.html) " -"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" -"differential-privacy.html) 介绍了 Flower 的使用方法。" -#: ../../source/ref-changelog.md:367 +#: ../../source/ref-changelog.md:402 #, fuzzy msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:369 -#, fuzzy +#: ../../source/ref-changelog.md:404 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " -"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " -"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " -"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" -#: ../../source/ref-changelog.md:371 +#: ../../source/ref-changelog.md:406 #, fuzzy msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" + +#: ../../source/ref-changelog.md:410 +#, fuzzy +msgid "Documentation improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:373 +#: ../../source/ref-changelog.md:412 #, fuzzy msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." -msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:375 +#: ../../source/ref-changelog.md:414 #, fuzzy msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:377 +#: ../../source/ref-changelog.md:416 #, fuzzy msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " -"`ServerApp` 和 `ClientApp`。" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:379 -#, fuzzy +#: ../../source/ref-changelog.md:418 msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." +msgstr "" -#: ../../source/ref-changelog.md:381 -#, fuzzy +#: ../../source/ref-changelog.md:422 +msgid "**Deprecate** `client_fn(cid: str)`" +msgstr "" + +#: ../../source/ref-changelog.md:424 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" -"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " -"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" -#: ../../source/ref-changelog.md:383 -#, fuzzy +#: ../../source/ref-changelog.md:426 msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" +msgstr "" + +#: ../../source/ref-changelog.md:428 +msgid "" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:385 +#: ../../source/ref-changelog.md:432 #, fuzzy msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" + +#: ../../source/ref-changelog.md:434 +msgid "" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"基准线。其他几条基准线也已更新。" -#: ../../source/ref-changelog.md:387 +#: ../../source/ref-changelog.md:436 #, fuzzy msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" + +#: ../../source/ref-changelog.md:438 +msgid "" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" -#: ../../source/ref-changelog.md:389 +#: ../../source/ref-changelog.md:440 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:446 #, fuzzy msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " -"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:391 +#: ../../source/ref-changelog.md:450 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" "** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " "[#1496](https://github.com/adap/flower/pull/1496), " @@ -20519,7927 +20389,7923 @@ msgstr "" "[#1519](https://github.com/adap/flower/pull/1519), " "[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:393 -#, fuzzy +#: ../../source/ref-changelog.md:452 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." +msgstr "" + +#: ../../source/ref-changelog.md:454 +msgid "" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " -"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " -"\"ClientApp\"。许多其他示例也得到了大量更新。" -#: ../../source/ref-changelog.md:395 +#: ../../source/ref-changelog.md:456 #, fuzzy msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" - -#: ../../source/ref-changelog.md:401 -#, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:407 -#, fuzzy +#: ../../source/ref-changelog.md:458 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:411 +#: ../../source/ref-changelog.md:460 #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:413 -#, fuzzy +#: ../../source/ref-changelog.md:462 msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " -"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " -"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " -"RecordSet = self.context.state`。对该 `RecordSet` " -"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" -#: ../../source/ref-changelog.md:415 +#: ../../source/ref-changelog.md:464 #, fuzzy msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:417 -#, fuzzy +#: ../../source/ref-changelog.md:466 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " -"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" -#: ../../source/ref-changelog.md:419 +#: ../../source/ref-changelog.md:468 #, fuzzy msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" -msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:421 -#, fuzzy +#: ../../source/ref-changelog.md:470 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" -"explore.github.io/mlx)的联合学习。" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:472 #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:425 -#, fuzzy +#: ../../source/ref-changelog.md:474 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " -"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " -"提供了同类最佳的 XGBoost 支持。" -#: ../../source/ref-changelog.md:427 +#: ../../source/ref-changelog.md:476 #, fuzzy msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:429 -#, fuzzy +#: ../../source/ref-changelog.md:478 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." -msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +msgstr "" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:480 #, fuzzy msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:433 -#, fuzzy +#: ../../source/ref-changelog.md:482 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." -msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." +msgstr "" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:484 #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:437 -#, fuzzy +#: ../../source/ref-changelog.md:486 msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " -"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:488 #, fuzzy msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:441 -#, fuzzy +#: ../../source/ref-changelog.md:490 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" -#: ../../source/ref-changelog.md:443 -#, fuzzy +#: ../../source/ref-changelog.md:492 msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" +msgstr "" -#: ../../source/ref-changelog.md:445 -#, fuzzy +#: ../../source/ref-changelog.md:494 msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:496 +msgid "" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" +msgstr "" + +#: ../../source/ref-changelog.md:498 +msgid "" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" +msgstr "" + +#: ../../source/ref-changelog.md:500 +msgid "" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:502 #, fuzzy msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." -msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" +msgstr "" +"`TensorFlow快速入门 (教程) `_" + +#: ../../source/ref-changelog.md:504 +msgid "" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" +msgstr "" -#: ../../source/ref-changelog.md:449 +#: ../../source/ref-changelog.md:506 #, fuzzy msgid "" "**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:451 +#: ../../source/ref-changelog.md:508 #, fuzzy -msgid "Many Flower code examples received substantial updates." +msgid "As always, Flower code examples have received many updates." msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 -msgid "**Update Flower Baselines**" -msgstr "**更新 Flower Baselines**" - -#: ../../source/ref-changelog.md:455 -#, fuzzy +#: ../../source/ref-changelog.md:510 msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:456 +#: ../../source/ref-changelog.md:514 #, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "**Deprecate Python 3.8 support**" +msgstr "** 过时的 Python 3.8**" -#: ../../source/ref-changelog.md:457 +#: ../../source/ref-changelog.md:516 #, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.8 已于 2024-10-01 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/ref-changelog.md:458 +#: ../../source/ref-changelog.md:518 #, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" +msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:459 -#, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-changelog.md:520 +msgid "" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." +msgstr "" -#: ../../source/ref-changelog.md:460 +#: ../../source/ref-changelog.md:522 #, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" +msgstr "" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:462 -#, fuzzy +#: ../../source/ref-changelog.md:524 msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:464 -#, fuzzy +#: ../../source/ref-changelog.md:528 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" -#: ../../source/ref-changelog.md:466 +#: ../../source/ref-changelog.md:530 +msgid "" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." +msgstr "" + +#: ../../source/ref-changelog.md:532 #, fuzzy msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." -msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:468 +#: ../../source/ref-changelog.md:534 +msgid "" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." +msgstr "" + +#: ../../source/ref-changelog.md:536 #, fuzzy msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:470 -#, fuzzy +#: ../../source/ref-changelog.md:538 msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" -#: ../../source/ref-changelog.md:474 +#: ../../source/ref-changelog.md:540 #, fuzzy -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" -msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:476 +#: ../../source/ref-changelog.md:546 #, fuzzy msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " -"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " -"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " -"`start_client`。示例和文档已相应更新。" - -#: ../../source/ref-changelog.md:478 -#, fuzzy -msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" - -#: ../../source/ref-changelog.md:480 -#, fuzzy -msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." -msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:550 #, fuzzy msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**介绍 Flower Next 高级应用程序接口(稳定版)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-changelog.md:552 #, fuzzy msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " +"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " +"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " +"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " +"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:554 #, fuzzy msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:488 +#: ../../source/ref-changelog.md:556 #, fuzzy msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " -"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" +"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " +"还提供了新的 Flower Next " +"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " +"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " +"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " +"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " +"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-changelog.md:558 #, fuzzy msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" +msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-changelog.md:560 #, fuzzy msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " -"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" +"Flower Modifiers(我们称之为 " +"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " +"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" +" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-changelog.md:562 #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" +msgid "" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-changelog.md:564 #, fuzzy msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" +" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " +"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" +"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" -#: ../../source/ref-changelog.md:504 +#: ../../source/ref-changelog.md:566 +#, fuzzy msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"** 增加对 Python 3.12 的实验支持** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:506 +#: ../../source/ref-changelog.md:568 #, fuzzy msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" +" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" +"/explanation-differential-privacy.html) " +"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" +"differential-privacy.html) 介绍了 Flower 的使用方法。" -#: ../../source/ref-changelog.md:508 +#: ../../source/ref-changelog.md:570 #, fuzzy msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " -"\"示例,后者更加深入。" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:510 +#: ../../source/ref-changelog.md:572 #, fuzzy msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." +msgstr "" +"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " +"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " +"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " +"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" -#: ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:574 #, fuzzy msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " -"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" -" README 中)。" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:514 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." +msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" -#: ../../source/ref-changelog.md:516 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:518 +#: ../../source/ref-changelog.md:580 #, fuzzy msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " +"`ServerApp` 和 `ClientApp`。" -#: ../../source/ref-changelog.md:520 -#, fuzzy -msgid "Add gRPC request-response capability to the Android SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" - -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-changelog.md:582 #, fuzzy msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" -msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" - -#: ../../source/ref-changelog.md:524 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:584 #, fuzzy msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" +"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " +"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" -#: ../../source/ref-changelog.md:528 +#: ../../source/ref-changelog.md:586 #, fuzzy msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " -"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " -"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:530 +#: ../../source/ref-changelog.md:588 #, fuzzy msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." -msgstr "" -"为了向后兼容,`start_client()` 和 `start_numpy_client()` " -"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" - -#: ../../source/ref-changelog.md:532 -msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"基准线。其他几条基准线也已更新。" -#: ../../source/ref-changelog.md:534 +#: ../../source/ref-changelog.md:590 #, fuzzy msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." -msgstr "" -"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " -"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" - -#: ../../source/ref-changelog.md:536 -msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"**添加新**\"Bulyan " -"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891)" - -#: ../../source/ref-changelog.md:538 -msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" -msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" - -#: ../../source/ref-changelog.md:540 -#, fuzzy -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" -#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:592 #, fuzzy msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" -msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" - -#: ../../source/ref-changelog.md:548 -msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" -msgstr "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" - -#: ../../source/ref-changelog.md:550 -msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" -msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" - -#: ../../source/ref-changelog.md:552 -msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " +"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" -#: ../../source/ref-changelog.md:554 +#: ../../source/ref-changelog.md:594 +#, fuzzy msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" - -#: ../../source/ref-changelog.md:556 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" - -#: ../../source/ref-changelog.md:558 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" - -#: ../../source/ref-changelog.md:560 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" - -#: ../../source/ref-changelog.md:562 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" - -#: ../../source/ref-changelog.md:564 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" - -#: ../../source/ref-changelog.md:566 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" - -#: ../../source/ref-changelog.md:568 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:570 +#: ../../source/ref-changelog.md:596 +#, fuzzy msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " +"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " +"\"ClientApp\"。许多其他示例也得到了大量更新。" -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:598 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" -msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" +"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:604 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:610 #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " + +#: ../../source/ref-changelog.md:614 +#, fuzzy +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" +"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " "[#2305](https://github.com/adap/flower/pull/2305), " "[#2307](https://github.com/adap/flower/pull/2307), " "[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" -msgstr "" -"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446) " -"[#2561](https://github.com/adap/flower/pull/2561))" +"[#2435](https://github.com/adap/flower/pull/2435))" -#: ../../source/ref-changelog.md:576 +#: ../../source/ref-changelog.md:616 #, fuzzy msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " +"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " +"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " +"RecordSet = self.context.state`。对该 `RecordSet` " +"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:618 #, fuzzy msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" -msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:620 #, fuzzy msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " +"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" -#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 -#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 -#: ../../source/ref-changelog.md:857 -msgid "Flower received many improvements under the hood, too many to list here." -msgstr "Flower 进行了许多改进,这里就不一一列举了。" +#: ../../source/ref-changelog.md:622 +#, fuzzy +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:624 +#, fuzzy msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" +"explore.github.io/mlx)的联合学习。" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:626 +#, fuzzy msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." -msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" +msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:628 +#, fuzzy msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " +"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " +"提供了同类最佳的 XGBoost 支持。" -#: ../../source/ref-changelog.md:592 +#: ../../source/ref-changelog.md:630 +#, fuzzy msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" + +#: ../../source/ref-changelog.md:632 +#, fuzzy +msgid "" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" + +#: ../../source/ref-changelog.md:634 +#, fuzzy +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " -"`transport=\"rest\"` 来选择使用试验性 REST API。" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-changelog.md:594 -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.5.0 (2023-08-31)" +#: ../../source/ref-changelog.md:636 +#, fuzzy +msgid "" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:638 +#, fuzzy msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:640 +#, fuzzy msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " +"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:642 +#, fuzzy msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " -"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:644 +#, fuzzy msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:646 +#, fuzzy +msgid "" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:648 +#, fuzzy +msgid "" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" -"run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:650 +#, fuzzy msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" -msgstr "" -"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" -#: ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:652 #, fuzzy msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " -"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:614 -msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" -msgstr "" -"**介绍 Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +#: ../../source/ref-changelog.md:654 +#, fuzzy +msgid "Many Flower code examples received substantial updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/ref-changelog.md:616 -msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." -msgstr "" -"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " -"和代码示例外,现在还有 iOS 快速入门教程。" +#: ../../source/ref-changelog.md:656 ../../source/ref-changelog.md:749 +msgid "**Update Flower Baselines**" +msgstr "**更新 Flower Baselines**" -#: ../../source/ref-changelog.md:618 +#: ../../source/ref-changelog.md:658 +#, fuzzy msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:620 -msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." -msgstr "" -"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " -"和代码示例,现在还有 Android 快速入门教程。" +#: ../../source/ref-changelog.md:659 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:622 -msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" -msgstr "" -"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +#: ../../source/ref-changelog.md:660 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:624 -msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." -msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" +#: ../../source/ref-changelog.md:661 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/ref-changelog.md:626 -msgid "**Deprecate Python 3.7**" -msgstr "** 过时的 Python 3.7**" +#: ../../source/ref-changelog.md:662 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:628 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +#: ../../source/ref-changelog.md:663 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:630 +#: ../../source/ref-changelog.md:665 +#, fuzzy msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," -" [#1853](https://github.com/adap/flower/pull/1853)" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:632 +#: ../../source/ref-changelog.md:667 +#, fuzzy msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" - -#: ../../source/ref-changelog.md:634 -msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" - -#: ../../source/ref-changelog.md:636 +"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" + +#: ../../source/ref-changelog.md:669 +#, fuzzy msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." -msgstr "" -"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " -"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " -"`start_driver` 的工作示例。" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" -#: ../../source/ref-changelog.md:638 +#: ../../source/ref-changelog.md:671 +#, fuzzy msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"为 `mt-pytorch` **代码示例**添加参数聚合 " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:640 +#: ../../source/ref-changelog.md:673 +#, fuzzy msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " -"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" +"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" -#: ../../source/ref-changelog.md:642 +#: ../../source/ref-changelog.md:677 +#, fuzzy msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"**将实验性 REST API 移植到 Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/ref-changelog.md:644 +#: ../../source/ref-changelog.md:679 +#, fuzzy msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " -"[Starlette](https://www.starlette.io/) 。" +"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " +"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " +"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " +"`start_client`。示例和文档已相应更新。" -#: ../../source/ref-changelog.md:646 +#: ../../source/ref-changelog.md:681 +#, fuzzy msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:648 +#: ../../source/ref-changelog.md:683 +#, fuzzy msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" -msgstr "" -"**引入实验性 gRPC 请求-响应 API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901)" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-changelog.md:685 +#, fuzzy msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " -"应用程序接口,它使用请求-响应模型与客户端节点通信。" - -#: ../../source/ref-changelog.md:652 -msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-changelog.md:654 +#: ../../source/ref-changelog.md:687 +#, fuzzy msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"**用新的** `start_client(transport=\"rest\")` 替换实验性** " -"`start_client(rest=True)` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:689 +#, fuzzy msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" -" `transport`。过时的参数 `rest` 将在今后的版本中删除。" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:658 +#: ../../source/ref-changelog.md:691 +#, fuzzy msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" - -#: ../../source/ref-changelog.md:660 -msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " -"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" +"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " +"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" -#: ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:693 +#, fuzzy msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" - -#: ../../source/ref-changelog.md:664 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:695 +#, fuzzy msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " +"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" -#: ../../source/ref-changelog.md:668 -msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." -msgstr "" -"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " -"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " -"\"requirements.txt\"(除 \"pyproject.toml \"外)。" +#: ../../source/ref-changelog.md:697 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:670 +#: ../../source/ref-changelog.md:703 +#, fuzzy msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" - -#: ../../source/ref-changelog.md:678 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:707 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"** 增加对 Python 3.12 的实验支持** " +"([#2565](https://github.com/adap/flower/pull/2565))" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:709 +#, fuzzy msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " -"([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:711 +#, fuzzy msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" +"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " +"\"示例,后者更加深入。" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:713 +#, fuzzy msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:715 +#, fuzzy msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " -"示例](https://github.com/adap/flower/tree/main/examples/ios)!" +"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " +"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" +" README 中)。" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:717 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -msgstr "" -"**引入新的 " -"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721)" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:719 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" -"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " -"之旅。请转发给对联邦学习感兴趣的人!" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:721 +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" + +#: ../../source/ref-changelog.md:723 +#, fuzzy +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:725 +#, fuzzy msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " -"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " -"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" + +#: ../../source/ref-changelog.md:727 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:729 +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"**引入新的 Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:731 +#, fuzzy msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " -"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" +"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " +"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " +"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:733 +#, fuzzy msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"为了向后兼容,`start_client()` 和 `start_numpy_client()` " +"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:735 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." -msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:737 +#, fuzzy msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." -msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." +msgstr "" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " +"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:739 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**添加新**\"Bulyan " +"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891)" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:741 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" + +#: ../../source/ref-changelog.md:743 +#, fuzzy +msgid "" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:745 ../../source/ref-changelog.md:747 +#, fuzzy +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " -"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " -"服务器的内存效率。" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:751 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" +msgstr "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:753 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" -msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" +msgstr "" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:755 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"** 添加使用** `TabNet` ** 的新示例** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:757 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)。" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" + +#: ../../source/ref-changelog.md:759 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" + +#: ../../source/ref-changelog.md:761 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" + +#: ../../source/ref-changelog.md:763 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" + +#: ../../source/ref-changelog.md:765 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" + +#: ../../source/ref-changelog.md:767 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" + +#: ../../source/ref-changelog.md:769 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" + +#: ../../source/ref-changelog.md:771 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:773 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" +msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:728 +#: ../../source/ref-changelog.md:775 +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." -msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" +msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:777 +#, fuzzy msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446) " +"[#2561](https://github.com/adap/flower/pull/2561))" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:779 +#, fuzzy msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " -"\"对象中。现在可以了!" +"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" + +#: ../../source/ref-changelog.md:781 +#, fuzzy +msgid "" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:734 +#: ../../source/ref-changelog.md:783 +#, fuzzy msgid "" "**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" -#: ../../source/ref-changelog.md:742 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:785 ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:1060 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "Flower 进行了许多改进,这里就不一一列举了。" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:789 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:791 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" -msgstr "" -"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:793 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " -"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " -"客户端节点可以决定是否要处理某个任务。" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:795 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"**使Driver API 和Fleet " -"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " +"`transport=\"rest\"` 来选择使用试验性 REST API。" -#: ../../source/ref-changelog.md:758 -msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" -msgstr "" -"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " -"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" +#: ../../source/ref-changelog.md:797 +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.5.0 (2023-08-31)" -#: ../../source/ref-changelog.md:760 -#, fuzzy +#: ../../source/ref-changelog.md:803 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" - -#: ../../source/ref-changelog.md:762 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "支持 IPv4 和 IPv6 地址。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:807 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:809 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)。" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " +"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:811 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:813 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" -" 工具。" +"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" -#: ../../source/ref-changelog.md:772 +#: ../../source/ref-changelog.md:815 +#, fuzzy msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." +msgstr "" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:817 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" -" `proximal_mu`的参数,使局部模型与全局模型正则化。" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:819 msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." +msgstr "" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " +"和代码示例外,现在还有 iOS 快速入门教程。" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:821 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." -msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:823 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." +msgstr "" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " +"和代码示例,现在还有 Android 快速入门教程。" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:825 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:827 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" + +#: ../../source/ref-changelog.md:829 +msgid "**Deprecate Python 3.7**" +msgstr "** 过时的 Python 3.7**" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:831 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" + +#: ../../source/ref-changelog.md:833 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," +" [#1853](https://github.com/adap/flower/pull/1853)" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:835 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github. com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github. com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github. com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" -#: ../../source/ref-changelog.md:792 +#: ../../source/ref-changelog.md:837 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:839 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." +msgstr "" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " +"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " +"`start_driver` 的工作示例。" -#: ../../source/ref-changelog.md:800 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: ../../source/ref-changelog.md:841 +msgid "" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" +msgstr "" +"为 `mt-pytorch` **代码示例**添加参数聚合 " +"([#1785](https://github.com/adap/flower/pull/1785))" -#: ../../source/ref-changelog.md:806 +#: ../../source/ref-changelog.md:843 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " +"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:845 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"**引入新的 Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**将实验性 REST API 移植到 Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:812 +#: ../../source/ref-changelog.md:847 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -"在未来几周内,我们将发布一些新的参考,特别是对 FL " -"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " +"[Starlette](https://www.starlette.io/) 。" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:849 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:851 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " -"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" +"**引入实验性 gRPC 请求-响应 API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901)" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:853 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"**改进 Jupyter Notebook 教程中的 GPU 支持** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " +"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:855 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" -msgstr "" -"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " -"笔记本进行了改进!点击这里查看更新后的笔记本:" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:857 msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" -"flower-pytorch.html)" +"**用新的** `start_client(transport=\"rest\")` 替换实验性** " +"`start_client(rest=True)` " +"([#1880](https://github.com/adap/flower/pull/1880))" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:859 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" -"learning-strategy-pytorch.html)" +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" +" `transport`。过时的参数 `rest` 将在今后的版本中删除。" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:861 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" -msgstr "" -"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" -"scratch-pytorch.html)" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:825 +#: ../../source/ref-changelog.md:863 msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " +"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" -#: ../../source/ref-changelog.md:827 +#: ../../source/ref-changelog.md:865 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-changelog.md:829 +#: ../../source/ref-changelog.md:867 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" + +#: ../../source/ref-changelog.md:869 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " -"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:871 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " -"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " +"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " +"\"requirements.txt\"(除 \"pyproject.toml \"外)。" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:873 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:835 -msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." -msgstr "" -"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " -"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " -"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" +#: ../../source/ref-changelog.md:881 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:837 +#: ../../source/ref-changelog.md:887 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" -" Flower 的用户来说尤其有用。" - -#: ../../source/ref-changelog.md:839 -msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" -msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:891 msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" -msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " +"([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:893 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -"** 添加新的使用 Pandas " -"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535)" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" -#: ../../source/ref-changelog.md:845 +#: ../../source/ref-changelog.md:895 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" -"/quickstart-pandas)。" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:897 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -"**添加新策略: Krum 和 MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " +"示例](https://github.com/adap/flower/tree/main/examples/ios)!" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:899 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " -"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" +"**引入新的 " +"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721)" -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:901 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" +"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " +"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/ref-changelog.md:853 +#: ../../source/ref-changelog.md:903 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." -msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:855 +#: ../../source/ref-changelog.md:905 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " +"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " +"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:907 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**引入新的 Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:909 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" -"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" - -#: ../../source/ref-changelog.md:869 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " +"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" -#: ../../source/ref-changelog.md:873 -msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" - -#: ../../source/ref-changelog.md:875 -msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" - -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:911 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:881 +#: ../../source/ref-changelog.md:913 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." -msgstr "" -"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" -" Flower 文档,新的解释器会提供更多细节。" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:915 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:917 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " -"SDK 组件,以及在 CoreML 上运行的一个任务示例。" - -#: ../../source/ref-changelog.md:887 -msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:919 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -"新的 \"FedMedian \"战略实现了[Yin " -"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " +"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " +"服务器的内存效率。" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:921 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:923 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." -msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:925 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" +msgstr "" +"** 添加使用** `TabNet` ** 的新示例** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:927 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " -"\"float\",以允许分配分数资源。" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:929 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:931 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." -msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:933 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"**使用** `flwr`向软件包提供类型信息 " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:935 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " -"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " +"\"对象中。现在可以了!" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:937 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" - -#: ../../source/ref-changelog.md:909 -msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." -msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" - -#: ../../source/ref-changelog.md:911 -msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" + +#: ../../source/ref-changelog.md:945 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:951 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github. com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:955 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." -msgstr "文档更新的数量之多,甚至没有必要逐一列出。" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " +"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:957 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." +msgstr "" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " +"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " +"客户端节点可以决定是否要处理某个任务。" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:959 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" -msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" +msgstr "" +"**使Driver API 和Fleet " +"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:961 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +msgstr "" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " +"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:963 +#, fuzzy msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " -"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:965 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "支持 IPv4 和 IPv6 地址。" + +#: ../../source/ref-changelog.md:967 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:969 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " -"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" +"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" -#: ../../source/ref-changelog.md:931 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" +#: ../../source/ref-changelog.md:971 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" +msgstr "" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:933 -msgid "Highlights" -msgstr "亮点" +#: ../../source/ref-changelog.md:973 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" +" 工具。" -#: ../../source/ref-changelog.md:935 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" +#: ../../source/ref-changelog.md:975 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:936 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" +#: ../../source/ref-changelog.md:977 +msgid "" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." +msgstr "" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" +" `proximal_mu`的参数,使局部模型与全局模型正则化。" -#: ../../source/ref-changelog.md:937 -msgid "Configurable `get_parameters`" -msgstr "可配置的`get_parameters`" +#: ../../source/ref-changelog.md:979 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:938 +#: ../../source/ref-changelog.md:981 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" -#: ../../source/ref-changelog.md:942 +#: ../../source/ref-changelog.md:983 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" + +#: ../../source/ref-changelog.md:985 +msgid "" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " -"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" -#: ../../source/ref-changelog.md:944 +#: ../../source/ref-changelog.md:987 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." - -#: ../../source/ref-changelog.md:948 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:950 +#: ../../source/ref-changelog.md:989 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " -"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," -" client=FlowerClient())`)。" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" -#: ../../source/ref-changelog.md:952 +#: ../../source/ref-changelog.md:991 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " -"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github. com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github. com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/ref-changelog.md:954 +#: ../../source/ref-changelog.md:995 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " -"`start_simulation`现在用一个类型为 " -"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " -"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" - -#: ../../source/ref-changelog.md:956 -msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:997 ../../source/ref-changelog.md:1064 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" - -#: ../../source/ref-changelog.md:960 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:961 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" -#: ../../source/ref-changelog.md:962 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +#: ../../source/ref-changelog.md:1003 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:1009 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +msgstr "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:1013 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " -"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" - -#: ../../source/ref-changelog.md:968 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"**引入新的 Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:1015 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -"**添加*** `server_round` ***到*** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"在未来几周内,我们将发布一些新的参考,特别是对 FL " +"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:1017 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." -msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:1019 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " +"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:1021 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " -"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" +"**改进 Jupyter Notebook 教程中的 GPU 支持** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:1023 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " +"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:1025 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -"几个 Flower " -"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" -" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" +"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" +"flower-pytorch.html)" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:1026 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"**移动*** `flwr.dataset` **到*** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:984 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" - -#: ../../source/ref-changelog.md:986 -msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" - -#: ../../source/ref-changelog.md:988 -msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." -msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" +"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" +"learning-strategy-pytorch.html)" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:1027 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -"**重新命名** `Weights` **到** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" +"scratch-pytorch.html)" -#: ../../source/ref-changelog.md:992 +#: ../../source/ref-changelog.md:1028 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." -msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" +"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:1030 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:996 +#: ../../source/ref-changelog.md:1032 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"start_server \"参数 \"force_final_distributed_eval " -"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" +"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/ref-changelog.md:998 +#: ../../source/ref-changelog.md:1034 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " +"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" -#: ../../source/ref-changelog.md:1000 +#: ../../source/ref-changelog.md:1036 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " -"\"一样,都接受配置字典。" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:1002 +#: ../../source/ref-changelog.md:1038 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " +"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " +"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" -#: ../../source/ref-changelog.md:1004 +#: ../../source/ref-changelog.md:1040 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " -"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" +" Flower 的用户来说尤其有用。" -#: ../../source/ref-changelog.md:1008 +#: ../../source/ref-changelog.md:1042 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" -#: ../../source/ref-changelog.md:1010 +#: ../../source/ref-changelog.md:1044 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." -msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" -#: ../../source/ref-changelog.md:1012 +#: ../../source/ref-changelog.md:1046 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"** 添加新的使用 Pandas " +"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/ref-changelog.md:1014 +#: ../../source/ref-changelog.md:1048 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " -"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " -"`evaluate`!" +"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" -#: ../../source/ref-changelog.md:1016 +#: ../../source/ref-changelog.md:1050 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**添加新策略: Krum 和 MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:1018 +#: ../../source/ref-changelog.md:1052 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " -"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " +"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" -#: ../../source/ref-changelog.md:1020 +#: ../../source/ref-changelog.md:1054 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " +"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/ref-changelog.md:1022 +#: ../../source/ref-changelog.md:1056 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" - -#: ../../source/ref-changelog.md:1024 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" - -#: ../../source/ref-changelog.md:1025 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" -#: ../../source/ref-changelog.md:1026 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch`" - -#: ../../source/ref-changelog.md:1027 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" - -#: ../../source/ref-changelog.md:1028 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" - -#: ../../source/ref-changelog.md:1029 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow`" - -#: ../../source/ref-changelog.md:1031 -msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" - -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:1058 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " -"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:1062 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:1066 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " -"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " -"`.md`,并修复了一些较小的细节!" +"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" -#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 -#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 -msgid "**Minor updates**" -msgstr "**小规模更新**" +#: ../../source/ref-changelog.md:1072 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:1076 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" -#: ../../source/ref-changelog.md:1042 +#: ../../source/ref-changelog.md:1078 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " -"([#847](https://github.com/adap/flower/pull/847))" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:1043 +#: ../../source/ref-changelog.md:1082 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:1044 +#: ../../source/ref-changelog.md:1084 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259)" - -#: ../../source/ref-changelog.md:1046 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" +" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/ref-changelog.md:1050 +#: ../../source/ref-changelog.md:1086 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -msgstr "" -"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:1052 -#, fuzzy +#: ../../source/ref-changelog.md:1088 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -"Flower Baselines 的第一个预览版已经发布!我们通过实现 " -"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " -"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" -"/contributing-baselines.html)。" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " +"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/ref-changelog.md:1054 +#: ../../source/ref-changelog.md:1090 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:1092 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " -"演示了一个简单的 C++ 客户端。" +"新的 \"FedMedian \"战略实现了[Yin " +"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:1094 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" -msgstr "" -"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:1096 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." -msgstr "" -"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " -"Python 版本的实验支持。" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" -#: ../../source/ref-changelog.md:1062 +#: ../../source/ref-changelog.md:1098 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/ref-changelog.md:1064 +#: ../../source/ref-changelog.md:1100 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " -"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " +"\"float\",以允许分配分数资源。" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:1102 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:1104 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." -msgstr "" -"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " -"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:1106 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"**允许在所有内置策略中同时使用联邦评价和集中评估** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**使用** `flwr`向软件包提供类型信息 " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:1108 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " -"`0.0`来禁用联邦评估。" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " +"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:1110 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"**两本新的 Jupyter Notebook 教程** " -"([#1141](https://github.com/adap/flower/pull/1141))" - -#: ../../source/ref-changelog.md:1076 -msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:1078 +#: ../../source/ref-changelog.md:1112 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -msgstr "" -"*联邦学习简介*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/ref-changelog.md:1080 +#: ../../source/ref-changelog.md:1114 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"*在联邦学习中使用策略*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github. com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/ref-changelog.md:1082 +#: ../../source/ref-changelog.md:1116 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" -msgstr "" -"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "文档更新的数量之多,甚至没有必要逐一列出。" -#: ../../source/ref-changelog.md:1084 +#: ../../source/ref-changelog.md:1118 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." -msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:1086 +#: ../../source/ref-changelog.md:1120 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" -#: ../../source/ref-changelog.md:1088 +#: ../../source/ref-changelog.md:1122 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." -msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:1090 +#: ../../source/ref-changelog.md:1124 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143)" - -#: ../../source/ref-changelog.md:1092 -msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." -msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " +"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/ref-changelog.md:1096 +#: ../../source/ref-changelog.md:1126 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " -"运行([#1177](https://github.com/adap/flower/pull/1177))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:1097 +#: ../../source/ref-changelog.md:1128 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " -"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " +"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" -#: ../../source/ref-changelog.md:1098 -msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" -msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" -" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175)" +#: ../../source/ref-changelog.md:1134 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:1099 -msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" +#: ../../source/ref-changelog.md:1136 +msgid "Highlights" +msgstr "亮点" -#: ../../source/ref-changelog.md:1100 -msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -msgstr "" -"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +#: ../../source/ref-changelog.md:1138 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" -#: ../../source/ref-changelog.md:1104 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" +#: ../../source/ref-changelog.md:1139 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" -#: ../../source/ref-changelog.md:1105 -msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +#: ../../source/ref-changelog.md:1140 +msgid "Configurable `get_parameters`" +msgstr "可配置的`get_parameters`" -#: ../../source/ref-changelog.md:1106 +#: ../../source/ref-changelog.md:1141 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" -#: ../../source/ref-changelog.md:1107 +#: ../../source/ref-changelog.md:1145 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " -"([#869](https://github.com/adap/flower/pull/869))" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " +"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/ref-changelog.md:1108 +#: ../../source/ref-changelog.md:1147 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/ref-changelog.md:1109 +#: ../../source/ref-changelog.md:1151 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" -msgstr "" -"**删除过时的 DefaultStrategy 策略** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/ref-changelog.md:1110 +#: ../../source/ref-changelog.md:1153 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -"**删除已过时的对 eval_fn 返回值准确性的支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " +"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," +" client=FlowerClient())`)。" -#: ../../source/ref-changelog.md:1111 +#: ../../source/ref-changelog.md:1155 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:1113 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " +"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/ref-changelog.md:1117 +#: ../../source/ref-changelog.md:1157 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " +"`start_simulation`现在用一个类型为 " +"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " +"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/ref-changelog.md:1119 +#: ../../source/ref-changelog.md:1159 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." -msgstr "" -"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " -"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation`" -" 使用虚拟客户端引擎)现在可以更流畅地运行。" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:1161 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" -msgstr "" -"**新的 Jupyter Notebook 代码示例** " -"([#833](https://github.com/adap/flower/pull/833))" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" -#: ../../source/ref-changelog.md:1123 -msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." -msgstr "" -"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " -"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" +#: ../../source/ref-changelog.md:1163 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/ref-changelog.md:1125 -msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" -msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" +#: ../../source/ref-changelog.md:1164 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/ref-changelog.md:1127 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." -msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" +#: ../../source/ref-changelog.md:1165 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/ref-changelog.md:1129 +#: ../../source/ref-changelog.md:1167 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:1169 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " -"Flower 就变得更容易了。" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " +"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" + +#: ../../source/ref-changelog.md:1171 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:1173 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " -"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " -"`FedAvg`实现迈出的第一步。" +"**添加*** `server_round` ***到*** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:1135 +#: ../../source/ref-changelog.md:1175 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" -msgstr "" -"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" -#: ../../source/ref-changelog.md:1137 +#: ../../source/ref-changelog.md:1177 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " -"keepalive 时间,自定义 gRPC 堆栈。" +"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:1179 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -"**使用 Opacus 和 PyTorch 的新差分隐私示例** " -"([#805](https://github.com/adap/flower/pull/805))" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " +"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/ref-changelog.md:1141 +#: ../../source/ref-changelog.md:1181 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:1143 +#: ../../source/ref-changelog.md:1183 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" - -#: ../../source/ref-changelog.md:1145 -msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." -msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" +"几个 Flower " +"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" +" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:1185 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/ref-changelog.md:1149 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" +#: ../../source/ref-changelog.md:1187 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:1189 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:1153 +#: ../../source/ref-changelog.md:1191 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." -msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" -#: ../../source/ref-changelog.md:1155 +#: ../../source/ref-changelog.md:1193 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"**更新**`FedAdam`**和**`FedYogi`**战略** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**重新命名** `Weights` **到** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:1157 +#: ../../source/ref-changelog.md:1195 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." -msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" -#: ../../source/ref-changelog.md:1159 +#: ../../source/ref-changelog.md:1197 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"**初始化** `start_simulation` **使用客户端 ID 列表** " -"([#860](https://github.com/adap/flower/pull/860))" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:1161 +#: ../../source/ref-changelog.md:1199 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " -"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " -"`int` 标识符访问的数据分区。" +"start_server \"参数 \"force_final_distributed_eval " +"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/ref-changelog.md:1165 +#: ../../source/ref-changelog.md:1201 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"更新 PyTorch 代码示例中的 \"num_examples \"计算 " -"([#909](https://github.com/adap/flower/pull/909))" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:1166 +#: ../../source/ref-changelog.md:1203 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -"通过 `flwr.__version__` 公开 Flower 版本 " -"([#952](https://github.com/adap/flower/pull/952))" +"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " +"\"一样,都接受配置字典。" -#: ../../source/ref-changelog.md:1167 +#: ../../source/ref-changelog.md:1205 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"`app.py`中的 `start_server`现在会返回一个 `History` " -"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" +"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:1168 +#: ../../source/ref-changelog.md:1207 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"使 `max_workers`(由 " -"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" - -#: ../../source/ref-changelog.md:1169 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " +"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/ref-changelog.md:1170 +#: ../../source/ref-changelog.md:1211 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" -msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:1171 +#: ../../source/ref-changelog.md:1213 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" -msgstr "还有更多底层更改、库更新、文档更改和工具改进!" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" -#: ../../source/ref-changelog.md:1175 +#: ../../source/ref-changelog.md:1215 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " -"([#869](https://github.com/adap/flower/pull/869))" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:1177 +#: ../../source/ref-changelog.md:1217 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " -"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" - -#: ../../source/ref-changelog.md:1179 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " +"`evaluate`!" -#: ../../source/ref-changelog.md:1183 +#: ../../source/ref-changelog.md:1219 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:1185 +#: ../../source/ref-changelog.md:1221 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " -"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " -"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " +"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" -#: ../../source/ref-changelog.md:1187 +#: ../../source/ref-changelog.md:1223 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"该功能仍处于试验阶段,因此无法保证 API " -"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:1189 +#: ../../source/ref-changelog.md:1225 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" -#: ../../source/ref-changelog.md:1191 -msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +#: ../../source/ref-changelog.md:1227 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" + +#: ../../source/ref-changelog.md:1228 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" + +#: ../../source/ref-changelog.md:1229 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch`" + +#: ../../source/ref-changelog.md:1230 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1231 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1232 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow`" -#: ../../source/ref-changelog.md:1192 +#: ../../source/ref-changelog.md:1234 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/ref-changelog.md:1194 +#: ../../source/ref-changelog.md:1236 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"**新的 PyTorch Lightning 代码示例** " -"([#617](https://github.com/adap/flower/pull/617))" +"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " +"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/ref-changelog.md:1196 +#: ../../source/ref-changelog.md:1238 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" -msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" +msgstr "" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:1198 +#: ../../source/ref-changelog.md:1240 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " +"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " +"`.md`,并修复了一些较小的细节!" -#: ../../source/ref-changelog.md:1200 -msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" -msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" +#: ../../source/ref-changelog.md:1242 ../../source/ref-changelog.md:1297 +#: ../../source/ref-changelog.md:1366 ../../source/ref-changelog.md:1405 +msgid "**Minor updates**" +msgstr "**小规模更新**" -#: ../../source/ref-changelog.md:1204 +#: ../../source/ref-changelog.md:1244 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" -msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/ref-changelog.md:1205 +#: ../../source/ref-changelog.md:1245 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -"当 `min_available_clients` 配置错误时发出警告 " -"([#830](https://github.com/adap/flower/pull/830))" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " +"([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:1206 +#: ../../source/ref-changelog.md:1246 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:1207 +#: ../../source/ref-changelog.md:1247 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259)" -#: ../../source/ref-changelog.md:1208 -msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" +#: ../../source/ref-changelog.md:1249 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/ref-changelog.md:1212 +#: ../../source/ref-changelog.md:1253 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" -msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:1214 +#: ../../source/ref-changelog.md:1255 +#, fuzzy msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" -" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 " +"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " +"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" +"/contributing-baselines.html)。" -#: ../../source/ref-changelog.md:1216 +#: ../../source/ref-changelog.md:1257 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" -msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:1218 +#: ../../source/ref-changelog.md:1259 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " -"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " +"演示了一个简单的 C++ 客户端。" -#: ../../source/ref-changelog.md:1220 +#: ../../source/ref-changelog.md:1261 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " +"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:1222 +#: ../../source/ref-changelog.md:1263 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " -"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " +"Python 版本的实验支持。" -#: ../../source/ref-changelog.md:1224 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:1265 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:1228 +#: ../../source/ref-changelog.md:1267 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." +msgstr "" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " +"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/ref-changelog.md:1230 -msgid "(abstract) FedOpt" -msgstr "(摘要) FedOpt" +#: ../../source/ref-changelog.md:1269 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:1233 +#: ../../source/ref-changelog.md:1271 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " +"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/ref-changelog.md:1235 +#: ../../source/ref-changelog.md:1273 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " -"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" +"**允许在所有内置策略中同时使用联邦评价和集中评估** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:1237 +#: ../../source/ref-changelog.md:1275 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " -"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " -"指标字典,以便服务器跟踪。" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " +"`0.0`来禁用联邦评估。" -#: ../../source/ref-changelog.md:1239 +#: ../../source/ref-changelog.md:1277 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " -"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " -"`return loss, {\"accuracy\": accuracy}`。" - -#: ../../source/ref-changelog.md:1241 -msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." -msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" +"**两本新的 Jupyter Notebook 教程** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:1243 +#: ../../source/ref-changelog.md:1279 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" -#: ../../source/ref-changelog.md:1245 +#: ../../source/ref-changelog.md:1281 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " -"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" +"*联邦学习简介*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1247 +#: ../../source/ref-changelog.md:1283 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" - -#: ../../source/ref-changelog.md:1249 -msgid "MXNet example and documentation" -msgstr "MXNet 示例和文档" +"*在联邦学习中使用策略*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:1251 +#: ../../source/ref-changelog.md:1285 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " +"([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:1255 +#: ../../source/ref-changelog.md:1287 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" -msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." -#: ../../source/ref-changelog.md:1257 +#: ../../source/ref-changelog.md:1289 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." -msgstr "" -"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " -"`Parameters` 类取代(例如在 `Strategy`中)。参数 " -"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:1259 +#: ../../source/ref-changelog.md:1291 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." -msgstr "" -"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" -" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" -#: ../../source/ref-changelog.md:1261 +#: ../../source/ref-changelog.md:1293 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -"已弃用 `flwr.server.Server.evaluate`,改用 " -"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" - -#: ../../source/ref-changelog.md:1263 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" - -#: ../../source/ref-changelog.md:1267 -msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" -msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" +"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143)" -#: ../../source/ref-changelog.md:1269 +#: ../../source/ref-changelog.md:1295 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." -msgstr "" -"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " -"\"方法进行。" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" -#: ../../source/ref-changelog.md:1271 +#: ../../source/ref-changelog.md:1299 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"内置策略支持名为 \"initial_parameters " -"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" - -#: ../../source/ref-changelog.md:1290 -msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." -msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " +"运行([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:1294 +#: ../../source/ref-changelog.md:1300 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " -"`flwr.server.strategy.FedAvg`)" - -#: ../../source/ref-changelog.md:1296 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " +"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:1300 +#: ../../source/ref-changelog.md:1301 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175)" #: ../../source/ref-changelog.md:1302 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" -msgstr "" -"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " -"返回几乎任意的值,并在服务器端使用它们!" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:1304 +#: ../../source/ref-changelog.md:1303 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " -"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" +"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:1306 +#: ../../source/ref-changelog.md:1307 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." -msgstr "" -"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " -"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " -"`float, int, Dict[str, Scalar]`)。详见下面的示例。" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" #: ../../source/ref-changelog.md:1308 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:1323 +#: ../../source/ref-changelog.md:1309 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -msgstr "" -"**在**`Client.fit` " -"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:1325 +#: ../../source/ref-changelog.md:1310 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:1327 +#: ../../source/ref-changelog.md:1311 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " -"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" - -#: ../../source/ref-changelog.md:1329 -msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" - -#: ../../source/ref-changelog.md:1346 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" - -#: ../../source/ref-changelog.md:1350 -msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:1351 -msgid "Improved documentation" -msgstr "改进文档" - -#: ../../source/ref-changelog.md:1352 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" - -#: ../../source/ref-changelog.md:1353 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:1354 +#: ../../source/ref-changelog.md:1312 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除过时的 DefaultStrategy 策略** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:1355 +#: ../../source/ref-changelog.md:1313 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" -msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" - -#: ../../source/ref-changelog.md:1357 -msgid "Bugfix:" -msgstr "错误修正:" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除已过时的对 eval_fn 返回值准确性的支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:1359 +#: ../../source/ref-changelog.md:1314 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " -"\"中处理的([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))。" - -#: ../../source/ref-changelog.md:1361 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 -msgid "Important changes:" -msgstr "重要变更:" +#: ../../source/ref-changelog.md:1316 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/ref-changelog.md:1365 +#: ../../source/ref-changelog.md:1320 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" -msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:1366 +#: ../../source/ref-changelog.md:1322 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"添加了一个新的 NumPyClient(除现有的 KerasClient " -"之外)([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508)" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " +"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation`" +" 使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/ref-changelog.md:1367 +#: ../../source/ref-changelog.md:1324 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" - -#: ../../source/ref-changelog.md:1369 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1371 -msgid "Incompatible changes:" -msgstr "不兼容的更改:" +"**新的 Jupyter Notebook 代码示例** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:1373 +#: ../../source/ref-changelog.md:1326 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " -"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " -"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" - -#: ../../source/ref-changelog.md:1374 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" +"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " +"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" -#: ../../source/ref-changelog.md:1375 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:1328 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:1376 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:1330 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" -#: ../../source/ref-changelog.md:1377 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:1332 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:1381 +#: ../../source/ref-changelog.md:1334 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " -"。迁移时请使用 `FedAvg`。" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " +"Flower 就变得更容易了。" -#: ../../source/ref-changelog.md:1382 +#: ../../source/ref-changelog.md:1336 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." -msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" +"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " +"`FedAvg`实现迈出的第一步。" -#: ../../source/ref-changelog.md:1383 +#: ../../source/ref-changelog.md:1338 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"删除了策略界面中目前未使用的 " -"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/ref-changelog.md:1384 +#: ../../source/ref-changelog.md:1340 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " +"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/ref-changelog.md:1385 +#: ../../source/ref-changelog.md:1342 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -"改进了 `Strategy` " -"docstrings([#470](https://github.com/adap/flower/pull/470))。" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** " +"([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" -msgstr "项目实例" +#: ../../source/ref-changelog.md:1344 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:1346 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " -"`PyTorch `_ 或 `TensorFlow " -"`_。" - -#: ../../source/ref-example-projects.rst:9 -#, fuzzy -msgid "The following examples are available as standalone projects." -msgstr "以下示例可作为独立项目使用。" - -#: ../../source/ref-example-projects.rst:12 -#, fuzzy -msgid "Quickstart TensorFlow/Keras" -msgstr "快速入门 TensorFlow" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:1348 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" -msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:1350 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"`TensorFlow快速入门 (代码) `_" +"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-example-projects.rst:19 -#, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" -msgstr "" -"`TensorFlow快速入门 (教程) `_" +#: ../../source/ref-changelog.md:1352 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" -#: ../../source/ref-example-projects.rst:20 +#: ../../source/ref-changelog.md:1354 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -"`TensorFlow快速入门 (博客) `_" - -#: ../../source/ref-example-projects.rst:24 -#: ../../source/tutorial-quickstart-pytorch.rst:4 -msgid "Quickstart PyTorch" -msgstr "PyTorch快速入门" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:1356 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" -msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:1358 msgid "" -"`Quickstart PyTorch (Code) " -"`_" -msgstr "" -"`PyTorch快速入门 (代码) `_" - -#: ../../source/ref-example-projects.rst:31 -#, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"`PyTorch快速入门 (教程) `_" - -#: ../../source/ref-example-projects.rst:34 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch: 从集中式到联邦式" +"**更新**`FedAdam`**和**`FedYogi`**战略** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:1360 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" -msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:1362 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -"PyTorch: 从集中式到联邦式(代码) `_" +"**初始化** `start_simulation` **使用客户端 ID 列表** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-example-projects.rst:40 -#, fuzzy +#: ../../source/ref-changelog.md:1364 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -"PyTorch: 从集中式到联邦式(教程) `_" +"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " +"`int` 标识符访问的数据分区。" -#: ../../source/ref-example-projects.rst:44 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "树莓派和 Nvidia Jetson 上的联邦学习" +#: ../../source/ref-changelog.md:1368 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 " +"([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:1369 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" +"通过 `flwr.__version__` 公开 Flower 版本 " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-example-projects.rst:49 +#: ../../source/ref-changelog.md:1370 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " -"`_" +"`app.py`中的 `start_server`现在会返回一个 `History` " +"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-example-projects.rst:51 +#: ../../source/ref-changelog.md:1371 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " -"`_" +"使 `max_workers`(由 " +"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:1372 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-faq.rst -#, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" +#: ../../source/ref-changelog.md:1373 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-faq.rst:9 +#: ../../source/ref-changelog.md:1374 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "还有更多底层更改、库更新、文档更改和工具改进!" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:1378 msgid "" -"`Flower simulation PyTorch " -"`_" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"`Flower 模拟 PyTorch " -"`_" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-faq.rst:12 +#: ../../source/ref-changelog.md:1380 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -"`Flower模拟TensorFlow/Keras " -"`_" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " +"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" -msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" +#: ../../source/ref-changelog.md:1382 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/ref-faq.rst:16 +#: ../../source/ref-changelog.md:1386 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"请点击此处查看有关嵌入式设备联邦学习的 " -"\"博文\"`_和相应的" -" \"GitHub 代码示例\"`_。" +"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" -msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" +#: ../../source/ref-changelog.md:1388 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " +"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " +"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" -#: ../../source/ref-faq.rst:20 +#: ../../source/ref-changelog.md:1390 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" +"该功能仍处于试验阶段,因此无法保证 API " +"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-changelog.md:1392 msgid "" -"`Android Kotlin example `_" -msgstr "`Android Kotlin 示例 `_" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-faq.rst:23 -msgid "`Android Java example `_" -msgstr "Android Java 示例 `_" +#: ../../source/ref-changelog.md:1394 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" +#: ../../source/ref-changelog.md:1395 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/ref-faq.rst:27 +#: ../../source/ref-changelog.md:1397 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" -msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" - -#: ../../source/ref-faq.rst:30 -msgid "`FLock: A Decentralised AI Training Platform `_." -msgstr "" - -#: ../../source/ref-faq.rst:30 -msgid "Contribute to on-chain training the model and earn rewards." +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" +"**新的 PyTorch Lightning 代码示例** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-faq.rst:31 -#, fuzzy -msgid "Local blockchain with federated learning simulation." -msgstr "扩大联邦学习的规模" - -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:1399 msgid "" -"`Flower meets Nevermined GitHub Repository `_." -msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-changelog.md:1401 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." -msgstr "" -"`Flower meets Nevermined YouTube 视频 " -"`_." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-faq.rst:34 -#, fuzzy +#: ../../source/ref-changelog.md:1403 msgid "" -"`Flower meets KOSMoS `_." -msgstr "" -"`Flower meets KOSMoS `_." +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-changelog.md:1407 msgid "" -"`Flower meets Talan blog post `_ ." -msgstr "" -"`Flower meets Talan博文 `_ 。" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" -#: ../../source/ref-faq.rst:36 +#: ../../source/ref-changelog.md:1408 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -"`Flower meets Talan GitHub Repository " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "遥测功能" +"当 `min_available_clients` 配置错误时发出警告 " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:1409 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." -msgstr "" -"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:1410 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "原则" - -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "我们遵循严格的匿名使用指标收集原则:" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:1411 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." -msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:1415 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." -msgstr "" -"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " -"\"了解报告的指标。" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:1417 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" -"being-reported)\"部分" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" +" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:1419 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "如何退出" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:1421 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " -"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " -"服务器或客户端,只需在命令前添加以下内容即可:" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " +"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:1423 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " -"Flower telemetry。" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "收集的指标" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "Flower 遥测技术收集以下指标:" +"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:1425 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." -msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " +"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" -#: ../../source/ref-telemetry.md:32 -msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" -msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" +#: ../../source/ref-changelog.md:1427 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:1431 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." -msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-telemetry.md:36 -msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." -msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" +#: ../../source/ref-changelog.md:1433 +msgid "(abstract) FedOpt" +msgstr "(摘要) FedOpt" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:1436 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." -msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:1438 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " -"工作负载,而且还成功完成了它们。" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " +"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:1440 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " -"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " +"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " +"指标字典,以便服务器跟踪。" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:1442 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" -" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" +"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " +"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " +"`return loss, {\"accuracy\": accuracy}`。" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:1444 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." -msgstr "" -"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" -"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:1446 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "如何检查报告中的内容" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:1448 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " -"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " -"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " +"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:1450 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "如何联系我们" +#: ../../source/ref-changelog.md:1452 +msgid "MXNet example and documentation" +msgstr "MXNet 示例和文档" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:1454 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" -"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:1458 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-quickstart-android.rst:4 -msgid "Quickstart Android" -msgstr "快速入门 Android" +#: ../../source/ref-changelog.md:1460 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " +"`Parameters` 类取代(例如在 `Strategy`中)。参数 " +"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" -#: ../../source/tutorial-quickstart-android.rst:9 +#: ../../source/ref-changelog.md:1462 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" -msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" +" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" -#: ../../source/tutorial-quickstart-android.rst:11 +#: ../../source/ref-changelog.md:1464 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" +"已弃用 `flwr.server.Server.evaluate`,改用 " +"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" -#: ../../source/tutorial-quickstart-fastai.rst:4 -msgid "Quickstart fastai" -msgstr "快速入门 fastai" +#: ../../source/ref-changelog.md:1466 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/tutorial-quickstart-fastai.rst:6 -#, fuzzy +#: ../../source/ref-changelog.md:1470 msgid "" -"In this federated learning tutorial we will learn how to train a " -"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `." -msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-quickstart-fastai.rst:10 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 -msgid "Then, clone the code example directly from GitHub:" +#: ../../source/ref-changelog.md:1472 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " +"\"方法进行。" -#: ../../source/tutorial-quickstart-fastai.rst:18 +#: ../../source/ref-changelog.md:1474 msgid "" -"This will create a new directory called `quickstart-fastai` containing " -"the following files:" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" +"内置策略支持名为 \"initial_parameters " +"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" -#: ../../source/tutorial-quickstart-fastai.rst:31 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 -#, fuzzy -msgid "Next, activate your environment, then run:" -msgstr "并激活虚拟环境:" +#: ../../source/ref-changelog.md:1493 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" -#: ../../source/tutorial-quickstart-fastai.rst:41 +#: ../../source/ref-changelog.md:1497 msgid "" -"This example by default runs the Flower Simulation Engine, creating a " -"federation of 10 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" Let's run the project:" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " +"`flwr.server.strategy.FedAvg`)" -#: ../../source/tutorial-quickstart-fastai.rst:54 -#: ../../source/tutorial-quickstart-huggingface.rst:61 -#: ../../source/tutorial-quickstart-mlx.rst:60 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 -#: ../../source/tutorial-quickstart-pytorch.rst:62 -#: ../../source/tutorial-quickstart-tensorflow.rst:62 -msgid "With default arguments you will see an output like this one:" -msgstr "" +#: ../../source/ref-changelog.md:1499 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-quickstart-fastai.rst:98 -#: ../../source/tutorial-quickstart-huggingface.rst:112 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 -#: ../../source/tutorial-quickstart-pytorch.rst:103 -#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/ref-changelog.md:1503 msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-quickstart-fastai.rst:108 -#, fuzzy +#: ../../source/ref-changelog.md:1505 msgid "" -"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " -"in the Flower GitHub repository." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" - -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -#, fuzzy -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" - -#: ../../source/tutorial-quickstart-huggingface.rst:4 -msgid "Quickstart 🤗 Transformers" -msgstr "🤗 Transformers快速入门" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " +"返回几乎任意的值,并在服务器端使用它们!" -#: ../../source/tutorial-quickstart-huggingface.rst:6 -#, fuzzy +#: ../../source/ref-changelog.md:1507 msgid "" -"In this federated learning tutorial we will learn how to train a large " -"language model (LLM) on the `IMDB " -"`_ dataset using Flower" -" and the 🤗 Hugging Face Transformers library. It is recommended to create" -" a virtual environment and run everything within a :doc:`virtualenv " -"`." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " +"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:1509 msgid "" -"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " -"project. It will generate all the files needed to run, by default with " -"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " -"The dataset will be partitioned using |flowerdatasets|_'s " -"|iidpartitioner|_." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " +"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " +"`float, int, Dict[str, Scalar]`)。详见下面的示例。" -#: ../../source/tutorial-quickstart-huggingface.rst:17 -#: ../../source/tutorial-quickstart-mlx.rst:17 -#: ../../source/tutorial-quickstart-pytorch.rst:18 -#: ../../source/tutorial-quickstart-tensorflow.rst:18 -#, fuzzy +#: ../../source/ref-changelog.md:1511 msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" -#: ../../source/tutorial-quickstart-huggingface.rst:25 +#: ../../source/ref-changelog.md:1526 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``HuggingFace``), give a name to your " -"project, and type in your developer name:" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" +"**在**`Client.fit` " +"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -#: ../../source/tutorial-quickstart-mlx.rst:32 -#: ../../source/tutorial-quickstart-pytorch.rst:34 -#: ../../source/tutorial-quickstart-tensorflow.rst:34 +#: ../../source/ref-changelog.md:1528 msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。" -#: ../../source/tutorial-quickstart-huggingface.rst:47 -#: ../../source/tutorial-quickstart-mlx.rst:46 -#: ../../source/tutorial-quickstart-pytorch.rst:48 -#: ../../source/tutorial-quickstart-tensorflow.rst:48 +#: ../../source/ref-changelog.md:1530 msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " +"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" -#: ../../source/tutorial-quickstart-huggingface.rst:54 -#: ../../source/tutorial-quickstart-pytorch.rst:55 -#: ../../source/tutorial-quickstart-tensorflow.rst:55 -msgid "To run the project, do:" -msgstr "" +#: ../../source/ref-changelog.md:1532 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" -#: ../../source/tutorial-quickstart-huggingface.rst:102 -msgid "You can also run the project with GPU as follows:" -msgstr "" +#: ../../source/ref-changelog.md:1549 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" -#: ../../source/tutorial-quickstart-huggingface.rst:109 +#: ../../source/ref-changelog.md:1553 msgid "" -"This will use the default arguments where each ``ClientApp`` will use 2 " -"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." -msgstr "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-quickstart-huggingface.rst:120 -#: ../../source/tutorial-quickstart-mlx.rst:110 -#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/ref-changelog.md:1554 +msgid "Improved documentation" +msgstr "改进文档" + +#: ../../source/ref-changelog.md:1555 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:1556 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1557 msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." -msgstr "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:115 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -#, fuzzy -msgid "The Data" -msgstr "加载数据" +#: ../../source/ref-changelog.md:1558 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-quickstart-huggingface.rst:126 +#: ../../source/ref-changelog.md:1560 +msgid "Bugfix:" +msgstr "错误修正:" + +#: ../../source/ref-changelog.md:1562 msgid "" -"This tutorial uses |flowerdatasets|_ to easily download and partition the" -" `IMDB `_ dataset. In " -"this example you'll make use of the |iidpartitioner|_ to generate " -"``num_partitions`` partitions. You can choose |otherpartitioners|_ " -"available in Flower Datasets. To tokenize the text, we will also load the" -" tokenizer from the pre-trained Transformer model that we'll use during " -"training - more on that in the next section. Each ``ClientApp`` will call" -" this function to create dataloaders with the data that correspond to " -"their data partition." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " +"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))。" -#: ../../source/tutorial-quickstart-huggingface.rst:171 -#: ../../source/tutorial-quickstart-mlx.rst:155 -#: ../../source/tutorial-quickstart-pytorch.rst:150 -#: ../../source/tutorial-quickstart-tensorflow.rst:139 -#, fuzzy -msgid "The Model" -msgstr "训练模型" +#: ../../source/ref-changelog.md:1564 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-quickstart-huggingface.rst:173 -#, fuzzy +#: ../../source/ref-changelog.md:1566 ../../source/ref-changelog.md:1582 +msgid "Important changes:" +msgstr "重要变更:" + +#: ../../source/ref-changelog.md:1568 msgid "" -"We will leverage 🤗 Hugging Face to federate the training of language " -"models over multiple clients using Flower. More specifically, we will " -"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " -"classification over the dataset of IMDB ratings. The end goal is to " -"detect if a movie rating is positive or negative. If you have access to " -"larger GPUs, feel free to use larger models!" -msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-quickstart-huggingface.rst:185 +#: ../../source/ref-changelog.md:1569 msgid "" -"Note that here, ``model_name`` is a string that will be loaded from the " -"``Context`` in the ClientApp and ServerApp." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" +"添加了一个新的 NumPyClient(除现有的 KerasClient " +"之外)([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508)" -#: ../../source/tutorial-quickstart-huggingface.rst:188 +#: ../../source/ref-changelog.md:1570 msgid "" -"In addition to loading the pretrained model weights and architecture, we " -"also include two utility functions to perform both training (i.e. " -"``train()``) and evaluation (i.e. ``test()``) using the above model. " -"These functions should look fairly familiar if you have some prior " -"experience with PyTorch. Note these functions do not have anything " -"specific to Flower. That being said, the training function will normally " -"be called, as we'll see later, from a Flower client passing its own data." -" In summary, your clients can use standard training/testing functions to " -"perform local training or evaluation:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/tutorial-quickstart-huggingface.rst:228 -#: ../../source/tutorial-quickstart-mlx.rst:199 -#: ../../source/tutorial-quickstart-pytorch.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:168 -#, fuzzy -msgid "The ClientApp" -msgstr "客户端" +#: ../../source/ref-changelog.md:1572 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/ref-changelog.md:1574 +msgid "Incompatible changes:" +msgstr "不兼容的更改:" + +#: ../../source/ref-changelog.md:1576 msgid "" -"The main changes we have to make to use 🤗 Hugging Face with Flower will " -"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " -"the hood, the ``transformers`` library uses PyTorch, which means we can " -"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" -" the :doc:`Quickstart PyTorch ` tutorial. As" -" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" -" and represented as a list of NumPy arrays. The ``set_weights()`` " -"function that's the opposite: given a list of NumPy arrays it applies " -"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " +"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " +"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" -#: ../../source/tutorial-quickstart-huggingface.rst:241 -#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/ref-changelog.md:1577 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" + +#: ../../source/ref-changelog.md:1578 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:1579 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1580 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:1584 msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " +"。迁移时请使用 `FedAvg`。" -#: ../../source/tutorial-quickstart-huggingface.rst:257 -#: ../../source/tutorial-quickstart-pytorch.rst:250 +#: ../../source/ref-changelog.md:1585 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" -msgstr "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" -#: ../../source/tutorial-quickstart-huggingface.rst:283 +#: ../../source/ref-changelog.md:1586 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additional hyperparameters in ``pyproject.toml`` and access them here." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" +"删除了策略界面中目前未使用的 " +"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" -#: ../../source/tutorial-quickstart-huggingface.rst:316 -#: ../../source/tutorial-quickstart-mlx.rst:361 -#: ../../source/tutorial-quickstart-pytorch.rst:307 -#: ../../source/tutorial-quickstart-tensorflow.rst:232 -#, fuzzy -msgid "The ServerApp" -msgstr "服务器" - -#: ../../source/tutorial-quickstart-huggingface.rst:318 +#: ../../source/ref-changelog.md:1587 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"|serverappcomponents|_ as opposed to a |client|_ In this example we use " -"the `FedAvg` strategy. To it we pass a randomly initialized model that " -"will server as the global model to federated. Note that the value of " -"``fraction_fit`` is read from the run config. You can find the default " -"value defined in the ``pyproject.toml``." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/tutorial-quickstart-huggingface.rst:356 +#: ../../source/ref-changelog.md:1588 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system for an LLM." +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" +"改进了 `Strategy` " +"docstrings([#470](https://github.com/adap/flower/pull/470))。" -#: ../../source/tutorial-quickstart-huggingface.rst:361 +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "项目实例" + +#: ../../source/ref-example-projects.rst:4 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_hf_link|_ in the Flower GitHub repository. For a " -"comprehensive example of a federated fine-tuning of an LLM with Flower, " -"refer to the |flowertune|_ example in the Flower GitHub repository." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " +"`PyTorch `_ 或 `TensorFlow " +"`_。" -#: ../../source/tutorial-quickstart-ios.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "以下示例可作为独立项目使用。" -#: ../../source/tutorial-quickstart-ios.rst:4 -msgid "Quickstart iOS" -msgstr "快速入门 iOS" +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "快速入门 TensorFlow" -#: ../../source/tutorial-quickstart-ios.rst:9 +#: ../../source/ref-example-projects.rst:14 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." -msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" -#: ../../source/tutorial-quickstart-ios.rst:12 -#, fuzzy +#: ../../source/ref-example-projects.rst:17 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " -"`_ 中运行一切。对于在 iOS 中实现 " -"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" - -#: ../../source/tutorial-quickstart-ios.rst:17 -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" +"`TensorFlow快速入门 (代码) `_" -#: ../../source/tutorial-quickstart-ios.rst:20 -msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" +#: ../../source/ref-example-projects.rst:19 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/tutorial-quickstart-ios.rst:26 +#: ../../source/ref-example-projects.rst:20 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`TensorFlow快速入门 (博客) `_" -#: ../../source/tutorial-quickstart-ios.rst:33 -msgid "Or Poetry:" -msgstr "或者Poetry:" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "PyTorch快速入门" -#: ../../source/tutorial-quickstart-ios.rst:40 -#: ../../source/tutorial-quickstart-scikitlearn.rst:43 -#: ../../source/tutorial-quickstart-xgboost.rst:65 -msgid "Flower Client" -msgstr "Flower 客户端" +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#: ../../source/tutorial-quickstart-ios.rst:42 +#: ../../source/ref-example-projects.rst:29 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " -"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " -"中实现并存储。客户端实现如下:" +"`PyTorch快速入门 (代码) `_" -#: ../../source/tutorial-quickstart-ios.rst:80 +#: ../../source/ref-example-projects.rst:31 #, fuzzy -msgid "" -"Let's create a new application project in Xcode and add ``flwr`` as a " -"dependency in your project. For our application, we will store the logic " -"of our app in ``FLiOSModel.swift`` and the UI elements in " -"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" -" quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " -":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " -"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " -"`_ 以了解更多有关应用程序的信息。" +"`PyTorch快速入门 (教程) `_" -#: ../../source/tutorial-quickstart-ios.rst:86 -#, fuzzy -msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" -msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch: 从集中式到联邦式" -#: ../../source/tutorial-quickstart-ios.rst:94 -#, fuzzy +#: ../../source/ref-example-projects.rst:36 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" -" ``MLBatchProvider`` object. The preprocessing is done inside " -"``DataLoader.swift``." -msgstr "" -"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " -"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " -"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " -"中完成。" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#: ../../source/tutorial-quickstart-ios.rst:112 -#, fuzzy +#: ../../source/ref-example-projects.rst:38 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -"``MLModelInspect``." +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " -"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" +"PyTorch: 从集中式到联邦式(代码) `_" -#: ../../source/tutorial-quickstart-ios.rst:118 +#: ../../source/ref-example-projects.rst:40 #, fuzzy msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" +"PyTorch: 从集中式到联邦式(教程) `_" -#: ../../source/tutorial-quickstart-ios.rst:133 -#, fuzzy +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "树莓派和 Nvidia Jetson 上的联邦学习" + +#: ../../source/ref-example-projects.rst:46 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function ``startFlwrGRPC``." -msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#: ../../source/tutorial-quickstart-ios.rst:141 -#, fuzzy +#: ../../source/ref-example-projects.rst:49 msgid "" -"That's it for the client. We only have to implement ``Client`` or call " -"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" -" ``hostname`` and ``port`` tells the client which server to connect to. " -"This can be done by entering the hostname and port in the application " -"before clicking the start button to start the federated learning process." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " -":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " -"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" - -#: ../../source/tutorial-quickstart-ios.rst:148 -#: ../../source/tutorial-quickstart-scikitlearn.rst:179 -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "Flower Server" -msgstr "Flower 服务器" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " +"`_" -#: ../../source/tutorial-quickstart-ios.rst:150 -#, fuzzy +#: ../../source/ref-example-projects.rst:51 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -"``server.py``, import Flower and start the server:" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " -"Flower 并启动服务器:" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " +"`_" -#: ../../source/tutorial-quickstart-ios.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:254 -msgid "Train the model, federated!" -msgstr "联邦训练模型!" +#: ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "常见问题" -#: ../../source/tutorial-quickstart-ios.rst:163 -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-faq.rst:4 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" -#: ../../source/tutorial-quickstart-ios.rst:171 -msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." -msgstr "" -"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " -"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " -"`_。" - -#: ../../source/tutorial-quickstart-ios.rst:177 +#: ../../source/ref-faq.rst #, fuzzy -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in ``examples/ios``." -msgstr "" -"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 " -":code:`examples/ios` 中找到。" +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-faq.rst:9 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" - -#: ../../source/tutorial-quickstart-jax.rst:4 -msgid "Quickstart JAX" -msgstr "快速入门 JAX" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#: ../../source/tutorial-quickstart-jax.rst:9 +#: ../../source/ref-faq.rst:11 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"`Flower simulation PyTorch " +"`_" msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " -"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " -"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " -"`_" -" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" +"`Flower 模拟 PyTorch " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:20 -#, fuzzy +#: ../../source/ref-faq.rst:12 msgid "" -"Before we start building our JAX example, we need install the packages " -"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " -"和 :code:`flwr`:" - -#: ../../source/tutorial-quickstart-jax.rst:28 -msgid "Linear Regression with JAX" -msgstr "使用 JAX 进行线性回归" +"`Flower模拟TensorFlow/Keras " +"`_" -#: ../../source/tutorial-quickstart-jax.rst:30 -#, fuzzy -msgid "" -"We begin with a brief description of the centralized training code based " -"on a ``Linear Regression`` model. If you want a more in-depth explanation" -" of what's going on then have a look at the official `JAX documentation " -"`_." -msgstr "" -"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " -"`JAX 文档 `_。" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" -#: ../../source/tutorial-quickstart-jax.rst:34 -#, fuzzy +#: ../../source/ref-faq.rst:16 msgid "" -"Let's create a new file called ``jax_training.py`` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " -"imported. In addition, we need to import ``sklearn`` since we use " -"``make_regression`` for the dataset and ``train_test_split`` to split the" -" dataset into a training and test set. You can see that we do not yet " -"import the ``flwr`` package for federated learning. This will be done " -"later." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " -"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " -":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " -"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" - -#: ../../source/tutorial-quickstart-jax.rst:51 -#, fuzzy -msgid "The ``load_data()`` function loads the mentioned training and test sets." -msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" +"请点击此处查看有关嵌入式设备联邦学习的 " +"\"博文\"`_和相应的" +" \"GitHub 代码示例\"`_。" -#: ../../source/tutorial-quickstart-jax.rst:63 -#, fuzzy -msgid "" -"The model architecture (a very simple ``Linear Regression`` model) is " -"defined in ``load_model()``." -msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" -#: ../../source/tutorial-quickstart-jax.rst:73 -#, fuzzy +#: ../../source/ref-faq.rst:20 msgid "" -"We now need to define the training (function ``train()``), which loops " -"over the training set and measures the loss (function ``loss_fn()``) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a ``grad()`` function (defined in the ``main()`` " -"function and called in ``train()``)." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " -":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " -"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" -#: ../../source/tutorial-quickstart-jax.rst:95 -#, fuzzy +#: ../../source/ref-faq.rst:22 msgid "" -"The evaluation of the model is defined in the function ``evaluation()``. " -"The function takes all test examples and measures the loss of the linear " -"regression model." -msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" +"`Android Kotlin example `_" +msgstr "`Android Kotlin 示例 `_" -#: ../../source/tutorial-quickstart-jax.rst:107 -#, fuzzy -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the ``jax.grad()`` function is defined in " -"``main()`` and passed to ``train()``." -msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " -"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " -":code:`train()`。" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "Android Java 示例 `_" -#: ../../source/tutorial-quickstart-jax.rst:126 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" -#: ../../source/tutorial-quickstart-jax.rst:132 +#: ../../source/ref-faq.rst:27 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" -#: ../../source/tutorial-quickstart-jax.rst:137 -msgid "JAX meets Flower" -msgstr "JAX 结合 Flower" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:139 -#, fuzzy -msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -"``jax_training.py`` for the *clients* that are connected to the *server*." -" The *server* sends model parameters to the clients. The *clients* run " -"the training and update the parameters. The updated parameters are sent " -"back to the *server*, which averages all received parameter updates. This" -" describes one round of the federated learning process, and we repeat " -"this for multiple rounds." +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " -":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" -#: ../../source/tutorial-quickstart-jax.rst:167 +#: ../../source/ref-faq.rst:31 #, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "扩大联邦学习的规模" + +#: ../../source/ref-faq.rst:32 msgid "" -"Finally, we will define our *client* logic in ``client.py`` and build " -"upon the previously defined JAX training in ``jax_training.py``. Our " -"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " -"update the parameters on our JAX model:" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " -":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " -":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" +"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/tutorial-quickstart-jax.rst:182 -#, fuzzy +#: ../../source/ref-faq.rst:33 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " -"implementation will be based on ``flwr.client.NumPyClient`` and we'll " -"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" -" than ``Client`` if you use a framework with good NumPy interoperability " -"(like JAX) because it avoids some of the boilerplate that would otherwise" -" be necessary. ``FlowerClient`` needs to implement four methods, two " -"methods for getting/setting model parameters, one method for training the" -" model, and one method for testing the model:" +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " -"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" - -#: ../../source/tutorial-quickstart-jax.rst:194 -#, fuzzy -msgid "``set_parameters (optional)``" -msgstr ":code:`set_parameters (可选)`" - -#: ../../source/tutorial-quickstart-jax.rst:193 -#, fuzzy -msgid "transform parameters to NumPy ``ndarray``'s" -msgstr "将参数转换为 NumPy :code:`ndarray`格式" - -#: ../../source/tutorial-quickstart-jax.rst:203 -msgid "get the updated local model parameters and return them to the server" -msgstr "获取更新后的本地模型参数并返回服务器" - -#: ../../source/tutorial-quickstart-jax.rst:208 -msgid "return the local loss to the server" -msgstr "向服务器返回本地损失值" +"`Flower meets Nevermined YouTube 视频 " +"`_." -#: ../../source/tutorial-quickstart-jax.rst:210 +#: ../../source/ref-faq.rst:34 #, fuzzy msgid "" -"The challenging part is to transform the JAX model parameters from " -"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " -"`NumPyClient`." +"`Flower meets KOSMoS `_." msgstr "" -"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " -"`NumPyClient` 兼容。" +"`Flower meets KOSMoS `_." -#: ../../source/tutorial-quickstart-jax.rst:213 -#, fuzzy +#: ../../source/ref-faq.rst:35 msgid "" -"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " -"functions ``train()`` and ``evaluate()`` previously defined in " -"``jax_training.py``. So what we really do here is we tell Flower through " -"our ``NumPyClient`` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give " -"you a better understanding of the data types that get passed around." +"`Flower meets Talan blog post `_ ." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " -":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" - -#: ../../source/tutorial-quickstart-jax.rst:286 -msgid "Having defined the federation process, we can run it." -msgstr "定义了联邦进程后,我们就可以运行它了。" +"`Flower meets Talan博文 `_ 。" -#: ../../source/tutorial-quickstart-jax.rst:315 +#: ../../source/ref-faq.rst:36 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" -msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower meets Talan GitHub Repository " +"`_ ." + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "遥测功能" -#: ../../source/tutorial-quickstart-jax.rst:321 +#: ../../source/ref-telemetry.md:3 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " -"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/tutorial-quickstart-jax.rst:325 +#: ../../source/ref-telemetry.md:5 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" -msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" -#: ../../source/tutorial-quickstart-mlx.rst:4 -#, fuzzy -msgid "Quickstart MLX" -msgstr "快速入门 JAX" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "原则" -#: ../../source/tutorial-quickstart-mlx.rst:6 -#, fuzzy -msgid "" -"In this federated learning tutorial we will learn how to train simple MLP" -" on MNIST using Flower and MLX. It is recommended to create a virtual " -"environment and run everything within a :doc:`virtualenv `." -msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "我们遵循严格的匿名使用指标收集原则:" -#: ../../source/tutorial-quickstart-mlx.rst:10 +#: ../../source/ref-telemetry.md:11 msgid "" -"Let's use `flwr new` to create a complete Flower+MLX project. It will " -"generate all the files needed to run, by default with the Simulation " -"Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." -msgstr "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" -#: ../../source/tutorial-quickstart-mlx.rst:25 +#: ../../source/ref-telemetry.md:12 msgid "" -"Then, run the command below. You will be prompted to select of the " -"available templates (choose ``MLX``), give a name to your project, and " -"type in your developer name:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:53 -msgid "To run the project do:" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " +"\"了解报告的指标。" -#: ../../source/tutorial-quickstart-mlx.rst:102 +#: ../../source/ref-telemetry.md:13 msgid "" -"You can also override the parameters defined in " -"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" +"being-reported)\"部分" -#: ../../source/tutorial-quickstart-mlx.rst:116 +#: ../../source/ref-telemetry.md:14 msgid "" -"We will use `Flower Datasets `_ to " -"easily download and partition the `MNIST` dataset. In this example you'll" -" make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets:" -msgstr "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" -#: ../../source/tutorial-quickstart-mlx.rst:157 +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "如何退出" + +#: ../../source/ref-telemetry.md:18 msgid "" -"We define the model as in the `centralized MLX example " -"`_, it's a " -"simple MLP:" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" +"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " +"服务器或客户端,只需在命令前添加以下内容即可:" -#: ../../source/tutorial-quickstart-mlx.rst:180 +#: ../../source/ref-telemetry.md:24 msgid "" -"We also define some utility functions to test our model and to iterate " -"over batches." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " +"Flower telemetry。" -#: ../../source/tutorial-quickstart-mlx.rst:201 +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "收集的指标" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "Flower 遥测技术收集以下指标:" + +#: ../../source/ref-telemetry.md:30 msgid "" -"The main changes we have to make to use `MLX` with `Flower` will be found" -" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " -"doesn't provide an easy way to convert the model parameters into a list " -"of ``np.array`` objects (the format we need for the serialization of the " -"messages to work)." -msgstr "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" -#: ../../source/tutorial-quickstart-mlx.rst:206 -msgid "The way MLX stores its parameters is as follows:" -msgstr "" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" -#: ../../source/tutorial-quickstart-mlx.rst:219 +#: ../../source/ref-telemetry.md:34 msgid "" -"Therefore, to get our list of ``np.array`` objects, we need to extract " -"each array and convert them into a NumPy array:" -msgstr "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" -#: ../../source/tutorial-quickstart-mlx.rst:228 +#: ../../source/ref-telemetry.md:36 msgid "" -"For the ``set_params()`` function, we perform the reverse operation. We " -"receive a list of NumPy arrays and want to convert them into MLX " -"parameters. Therefore, we iterate through pairs of parameters and assign " -"them to the `weight` and `bias` keys of each layer dict:" -msgstr "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" -#: ../../source/tutorial-quickstart-mlx.rst:243 +#: ../../source/ref-telemetry.md:38 msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset:" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " +"工作负载,而且还成功完成了它们。" -#: ../../source/tutorial-quickstart-mlx.rst:259 +#: ../../source/ref-telemetry.md:42 msgid "" -"Here, after updating the parameters, we perform the training as in the " -"centralized case, and return the new parameters." +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " +"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" -#: ../../source/tutorial-quickstart-mlx.rst:262 -msgid "And for the ``evaluate()`` method of the client:" +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" +" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/ref-telemetry.md:46 msgid "" -"We also begin by updating the parameters with the ones sent by the " -"server, and then we compute the loss and accuracy using the functions " -"defined above. In the constructor of the ``FlowerClient`` we instantiate " -"the `MLP` model as well as other components such as the optimizer." +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" +"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" -#: ../../source/tutorial-quickstart-mlx.rst:277 -#, fuzzy -msgid "Putting everything together we have:" -msgstr "把所有东西放在一起" +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" -#: ../../source/tutorial-quickstart-mlx.rst:331 +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "如何检查报告中的内容" + +#: ../../source/ref-telemetry.md:52 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that " -"``context`` enables you to get access to hyperparemeters defined in " -"``pyproject.toml`` to configure the run. In this tutorial we access, " -"among other hyperparameters, the ``local-epochs`` setting to control the " -"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " -"method." +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " +"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " +"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" -#: ../../source/tutorial-quickstart-mlx.rst:363 +#: ../../source/ref-telemetry.md:58 msgid "" -"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " -"an identical signature to that of ``client_fn()``, but the return type is" -" `ServerAppComponents `_ as " -"opposed to `Client `_. In this example we use the " -"``FedAvg`` strategy." -msgstr "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" -#: ../../source/tutorial-quickstart-mlx.rst:386 -#: ../../source/tutorial-quickstart-pytorch.rst:344 -#: ../../source/tutorial-quickstart-tensorflow.rst:266 +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "如何联系我们" + +#: ../../source/ref-telemetry.md:66 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" +"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" -#: ../../source/tutorial-quickstart-mlx.rst:390 -#, fuzzy +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"Check the `source code `_ of the extended version of this tutorial in ``examples" -"/quickstart-mlx`` in the Flower GitHub repository." -msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" +msgstr "快速入门 Android" + +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" +"The experimental Flower Android SDK is not compatible with the latest " +"version of Flower. Android support is currently being reworked and will " +"be released in 2025." +msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:4 -msgid "Quickstart Pandas" -msgstr "快速入门Pandas" +#: ../../source/tutorial-quickstart-android.rst:14 +msgid "" +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new Android SDK is released." +msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:9 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" +#: ../../source/tutorial-quickstart-android.rst:17 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" -#: ../../source/tutorial-quickstart-pandas.rst:11 +#: ../../source/tutorial-quickstart-android.rst:19 msgid "" "Please refer to the `full code example " -"`_ " -"to learn more." +"`_ to learn " +"more." msgstr "" -"请参阅 `完整代码示例 `_\" 了解更多信息。" +"请参阅`完整代码示例 " +"`_了解更多信息。" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "快速入门 fastai" -#: ../../source/tutorial-quickstart-pytorch.rst:6 +#: ../../source/tutorial-quickstart-fastai.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train a " -"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-pytorch.rst:11 -msgid "" -"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" -" generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:26 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``PyTorch``), give a name to your project, " -"and type in your developer name:" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:117 -msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create dataloaders with the data" -" that correspond to their data partition." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:152 -msgid "" -"We defined a simple Convolutional Neural Network (CNN), but feel free to " -"replace it with a more sophisticated model if you'd like:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:177 -msgid "" -"In addition to defining the model architecture, we also include two " -"utility functions to perform both training (i.e. ``train()``) and " -"evaluation (i.e. ``test()``) using the above model. These functions " -"should look fairly familiar if you have some prior experience with " -"PyTorch. Note these functions do not have anything specific to Flower. " -"That being said, the training function will normally be called, as we'll " -"see later, from a Flower client passing its own data. In summary, your " -"clients can use standard training/testing functions to perform local " -"training or evaluation:" -msgstr "" +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "并激活虚拟环境:" -#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"The main changes we have to make to use `PyTorch` with `Flower` will be " -"found in the ``get_weights()`` and ``set_weights()`` functions. In " -"``get_weights()`` PyTorch model parameters are extracted and represented " -"as a list of NumPy arrays. The ``set_weights()`` function that's the " -"oposite: given a list of NumPy arrays it applies them to an existing " -"PyTorch model. Doing this in fairly easy in PyTorch." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:282 -msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparemeters defined in your " -"``pyproject.toml`` to configure the run. In this tutorial we access the " -"`local-epochs` setting to control the number of epochs a ``ClientApp`` " -"will perform when running the ``fit()`` method. You could define " -"additioinal hyperparameters in ``pyproject.toml`` and access them here." +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-jax.rst:60 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-scikitlearn.rst:59 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +#: ../../source/tutorial-quickstart-xgboost.rst:492 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:309 +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-jax.rst:102 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-scikitlearn.rst:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +#: ../../source/tutorial-quickstart-xgboost.rst:537 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will server as " -"the global model to federated. Note that the value of ``fraction_fit`` is" -" read from the run config. You can find the default value defined in the " -"``pyproject.toml``." +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:348 +#: ../../source/tutorial-quickstart-fastai.rst:108 #, fuzzy msgid "" -"Check the `source code `_ of the extended version of this tutorial in " -"``examples/quickstart-pytorch`` in the Flower GitHub repository." +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" "此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-pytorch.rst:354 -#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 #, fuzzy -msgid "Video tutorial" -msgstr "教程" - -#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"The video shown below shows how to setup a PyTorch + Flower project using" -" our previously recommended APIs. A new video tutorial will be released " -"that shows the new APIs (as the content above does)" -msgstr "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 -msgid "Quickstart PyTorch Lightning" -msgstr "快速入门 PyTorch Lightning" +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "🤗 Transformers快速入门" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#: ../../source/tutorial-quickstart-huggingface.rst:6 #, fuzzy msgid "" -"In this federated learning tutorial we will learn how to train an " -"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " -"recommended to create a virtual environment and run everything within a " -":doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"This will create a new directory called `quickstart-pytorch-lightning` " -"containing the following files:" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy msgid "" -"By default, Flower Simulation Engine will be started and it will create a" -" federation of 4 nodes using `FedAvg `_ " -"as the aggregation strategy. The dataset will be partitioned using Flower" -" Dataset's `IidPartitioner `_." -" To run the project, do:" -msgstr "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"Each simulated `ClientApp` (two per round) will also log a summary of " -"their local training process. Expect this output to be similar to:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-jax.rst:32 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"Check the `source code `_ of this tutorial in ``examples" -"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-jax.rst:46 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:4 -msgid "Quickstart scikit-learn" -msgstr "scikit-learn快速入门" +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +#: ../../source/tutorial-quickstart-xgboost.rst:485 +msgid "To run the project, do:" +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:9 -#, fuzzy -msgid "" -"In this tutorial, we will learn how to train a ``Logistic Regression`` " -"model on MNIST using Flower and scikit-learn." +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " -"Regression` 模型。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-jax.rst:110 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:17 -msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-jax.rst:114 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#: ../../source/tutorial-quickstart-xgboost.rst:89 +#, fuzzy +msgid "The Data" +msgstr "加载数据" -#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-jax.rst:128 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-scikitlearn.rst:138 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 #, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:36 -msgid "Or simply install all dependencies using Poetry:" -msgstr "或者直接使用 Poetry 安装所有依赖项:" +msgid "The Model" +msgstr "训练模型" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#: ../../source/tutorial-quickstart-huggingface.rst:173 #, fuzzy msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within ``utils.py``. The " -"``utils.py`` contains different functions defining all the machine " -"learning basics:" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " -":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -#, fuzzy -msgid "``get_model_parameters()``" -msgstr ":code:`get_model_parameters()`" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -#, fuzzy -msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" -msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -#, fuzzy -msgid "``set_model_params()``" -msgstr ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#: ../../source/tutorial-quickstart-xgboost.rst:149 #, fuzzy -msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" -msgstr "设置:code:`sklean`的LogisticRegression模型的参数" +msgid "The ClientApp" +msgstr "客户端" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -#, fuzzy -msgid "``set_initial_params()``" -msgstr ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "初始化 Flower 服务器将要求的模型参数" +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"Please check out ``utils.py`` `here " -"`_ for more details. The pre-defined functions are used in" -" the ``client.py`` and imported. The ``client.py`` also requires to " -"import several packages such as Flower and scikit-learn:" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -"更多详情请查看 :code:`utils.py`` 这里 " -"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " -"还需要导入几个软件包,如 Flower 和 scikit-learn:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:75 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -"``FederatedDataset.load_partition()`` method loads the partitioned " -"training set for each partition ID defined in the ``--partition-id`` " -"argument." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " -"\"Flower Datasets " -"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" -" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-jax.rst:246 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-scikitlearn.rst:255 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#: ../../source/tutorial-quickstart-xgboost.rst:269 #, fuzzy +msgid "The ServerApp" +msgstr "服务器" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" -"Next, the logistic regression model is defined and initialized with " -"``utils.set_initial_params()``." -msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" -"The Flower server interacts with clients through an interface called " -"``Client``. When the server selects a particular client for training, it " -"sends training instructions over the network. The client receives those " -"instructions and calls one of the ``Client`` methods to run your code " -"(i.e., to fit the logistic regression we defined earlier)." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"Flower provides a convenience class called ``NumPyClient`` which makes it" -" easier to implement the ``Client`` interface when your workload uses " -"scikit-learn. Implementing ``NumPyClient`` usually means defining the " -"following methods (``set_parameters`` is optional though):" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " -"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "以 NumPy ndarrays 列表形式返回模型参数" +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 -#, fuzzy -msgid "``set_parameters`` (optional)" -msgstr ":code:`set_parameters` (可选)" +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" +msgstr "快速入门 iOS" + +#: ../../source/tutorial-quickstart-ios.rst:11 +msgid "" +"The experimental Flower iOS SDK is not compatible with the latest version" +" of Flower. iOS support is currently being reworked and will be released " +"in 2025." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:14 +msgid "" +"This quickstart tutorial is kept for historical purposes and will be " +"updated once the new iOS SDK is released." +msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "用从服务器接收到的参数更新本地模型参数" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#: ../../source/tutorial-quickstart-ios.rst:20 #, fuzzy -msgid "is directly imported with ``utils.set_model_params()``" -msgstr "直接导入 :code:`utils.set_model_params()`" +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " +"`_ 中运行一切。对于在 iOS 中实现 " +"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:135 -msgid "set the local model weights" -msgstr "设置本地模型参数" +#: ../../source/tutorial-quickstart-ios.rst:25 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:136 -msgid "train the local model" -msgstr "训练本地模型" +#: ../../source/tutorial-quickstart-ios.rst:28 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:137 -#, fuzzy -msgid "return the updated local model weights" -msgstr "接收更新的本地模型参数" +#: ../../source/tutorial-quickstart-ios.rst:34 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" + +#: ../../source/tutorial-quickstart-ios.rst:41 +msgid "Or Poetry:" +msgstr "或者Poetry:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:139 -msgid "test the local model" -msgstr "测试本地模型" +#: ../../source/tutorial-quickstart-ios.rst:48 +msgid "Flower Client" +msgstr "Flower 客户端" -#: ../../source/tutorial-quickstart-scikitlearn.rst:141 -msgid "The methods can be implemented in the following way:" -msgstr "这些方法可以通过以下方式实现:" +#: ../../source/tutorial-quickstart-ios.rst:50 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " +"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " +"中实现并存储。客户端实现如下:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#: ../../source/tutorial-quickstart-ios.rst:88 #, fuzzy msgid "" -"We can now create an instance of our class ``MnistClient`` and add one " -"line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " +":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " +"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " +"`_ 以了解更多有关应用程序的信息。" + +#: ../../source/tutorial-quickstart-ios.rst:94 +#, fuzzy +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#: ../../source/tutorial-quickstart-ios.rst:102 #, fuzzy msgid "" -"That's it for the client. We only have to implement ``Client`` or " -"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" -" client of type ``NumPyClient`` you'll need to first call its " -"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " -"which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" pass to the client." +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " +"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " +"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " +"中完成。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +#: ../../source/tutorial-quickstart-ios.rst:120 +#, fuzzy msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" -"learn。" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " +"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-ios.rst:126 #, fuzzy -msgid "``server.py``, import Flower and start the server:" -msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#: ../../source/tutorial-quickstart-ios.rst:141 +#, fuzzy +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" + +#: ../../source/tutorial-quickstart-ios.rst:149 #, fuzzy msgid "" -"The number of federated learning rounds is set in ``fit_round()`` and the" -" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " -"is called after each federated learning round and gives you information " -"about loss and accuracy. Note that we also make use of Flower Datasets " -"here to load the test split of the MNIST dataset for server-side " -"evaluation." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " -"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " +":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " +"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" + +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "Flower Server" +msgstr "Flower 服务器" -#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#: ../../source/tutorial-quickstart-ios.rst:158 #, fuzzy msgid "" -"The ``main`` contains the server-side parameter initialization " -"``utils.set_initial_params()`` as well as the aggregation strategy " -"``fl.server.strategy:FedAvg()``. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" -":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " -":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " -"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " -":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " +"Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:169 +msgid "Train the model, federated!" +msgstr "联邦训练模型!" -#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" "With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:264 -#: ../../source/tutorial-quickstart-xgboost.rst:598 +#: ../../source/tutorial-quickstart-ios.rst:179 msgid "" "Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" -msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "Open another terminal and start the second client:" -msgstr "打开另一台终端,启动第二个客户端:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:277 -#: ../../source/tutorial-quickstart-xgboost.rst:611 -msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" -msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " +"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " +"`_。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#: ../../source/tutorial-quickstart-ios.rst:185 #, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in ``examples/sklearn-logreg-" -"mnist``." +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 " +":code:`examples/ios` 中找到。" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 -#, fuzzy +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a CNN model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" -#: ../../source/tutorial-quickstart-tensorflow.rst:4 -msgid "Quickstart TensorFlow" -msgstr "快速入门 TensorFlow" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "快速入门 JAX" -#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#: ../../source/tutorial-quickstart-jax.rst:6 #, fuzzy msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " -"all, it is recommended to create a virtual environment and run everything" -" within a :doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train a linear " +"regression model using Flower and `JAX " +"`_. It is recommended to create a " +"virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-tensorflow.rst:11 +#: ../../source/tutorial-quickstart-jax.rst:11 msgid "" -"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " -"will generate all the files needed to run, by default with the Flower " -"Simulation Engine, a federation of 10 nodes using `FedAvg " -"`_. The " -"dataset will be partitioned using Flower Dataset's `IidPartitioner " -"`_." +"Let's use ``flwr new`` to create a complete Flower+JAX project. It will " +"generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_. A random " +"regression dataset will be loaded from scikit-learn's |makeregression|_ " +"function." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:26 +#: ../../source/tutorial-quickstart-jax.rst:24 msgid "" "Then, run the command below. You will be prompted to select one of the " -"available templates (choose ``TensorFlow``), give a name to your project," -" and type in your developer name:" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -msgid "" -"This tutorial uses `Flower Datasets `_ " -"to easily download and partition the `CIFAR-10` dataset. In this example " -"you'll make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets. Each " -"``ClientApp`` will call this function to create the ``NumPy`` arrays that" -" correspond to their data partition." +"available templates (choose ``JAX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:141 +#: ../../source/tutorial-quickstart-jax.rst:116 msgid "" -"Next, we need a model. We defined a simple Convolutional Neural Network " -"(CNN), but feel free to replace it with a more sophisticated model if " -"you'd like:" +"This tutorial uses scikit-learn's |makeregression|_ function to generate " +"a random regression problem." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:170 +#: ../../source/tutorial-quickstart-jax.rst:130 msgid "" -"With `TensorFlow`, we can use the built-in ``get_weights()`` and " -"``set_weights()`` functions, which simplifies the implementation with " -"`Flower`. The rest of the functionality in the ClientApp is directly " -"inspired by the centralized case. The ``fit()`` method in the client " -"trains the model using the local dataset. Similarly, the ``evaluate()`` " -"method is used to evaluate the model received on a held-out validation " -"set that the client might have:" +"We defined a simple linear regression model to demonstrate how to create " +"a JAX model, but feel free to replace it with a more sophisticated JAX " +"model if you'd like, (such as with NN-based `Flax " +"`_):" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:203 +#: ../../source/tutorial-quickstart-jax.rst:141 msgid "" -"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " -"defined above by means of a ``client_fn()`` callback. Note that the " -"`context` enables you to get access to hyperparameters defined in your " -"``pyproject.toml`` to configure the run. For example, in this tutorial we" -" access the `local-epochs` setting to control the number of epochs a " -"``ClientApp`` will perform when running the ``fit()`` method, in addition" -" to `batch-size`. You could define additional hyperparameters in " -"``pyproject.toml`` and access them here." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``evaluation()``) using the above model." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:234 +#: ../../source/tutorial-quickstart-jax.rst:172 msgid "" -"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" -" identical signature to that of ``client_fn()`` but the return type is " -"`ServerAppComponents `_ as " -"opposed to a `Client `_. In this example we use the " -"`FedAvg`. To it we pass a randomly initialized model that will serve as " -"the global model to federate." +"The main changes we have to make to use JAX with Flower will be found in " +"the ``get_params()`` and ``set_params()`` functions. In ``get_params()``," +" JAX model parameters are extracted and represented as a list of NumPy " +"arrays. The ``set_params()`` function is the opposite: given a list of " +"NumPy arrays it applies them to an existing JAX model." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:270 -#, fuzzy +#: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"Check the source code of the extended version of this tutorial in " -"|quickstart_tf_link|_ in the Flower GitHub repository." +"The ``get_params()`` and ``set_params()`` functions here are conceptually" +" similar to the ``get_weights()`` and ``set_weights()`` functions that we" +" defined in the :doc:`QuickStart PyTorch ` " +"tutorial." msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-tensorflow.rst:282 +#: ../../source/tutorial-quickstart-jax.rst:227 msgid "" -"The video shown below shows how to setup a TensorFlow + Flower project " -"using our previously recommended APIs. A new video tutorial will be " -"released that shows the new APIs (as the content above does)" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" - -#: ../../source/tutorial-quickstart-xgboost.rst:4 -msgid "Quickstart XGBoost" -msgstr "XGBoost快速入门" - -#: ../../source/tutorial-quickstart-xgboost.rst:13 -msgid "Federated XGBoost" -msgstr "联邦化 XGBoost" - -#: ../../source/tutorial-quickstart-xgboost.rst:15 +#: ../../source/tutorial-quickstart-jax.rst:248 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the ``FedAvg`` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``input_dim`` is read from the run config. You can find the default value" +" defined in the ``pyproject.toml``." msgstr "" -"EXtreme Gradient " -"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" -" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" - -#: ../../source/tutorial-quickstart-xgboost.rst:21 -msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." -msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" - -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "Why federated XGBoost?" -msgstr "为什么选择联邦 XGBoost?" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-jax.rst:276 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" - -#: ../../source/tutorial-quickstart-xgboost.rst:31 -msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"Congratulations! You've successfully built and run your first federated " +"learning system for JAX with Flower!" msgstr "" -"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " -"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" -#: ../../source/tutorial-quickstart-xgboost.rst:36 +#: ../../source/tutorial-quickstart-jax.rst:281 #, fuzzy msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"Check the source code of the extended version of this tutorial in " +"|quickstart_jax_link|_ in the Flower GitHub repository." msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " -"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " -"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" -"comprehensive `_),以运行各种实验。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:46 -msgid "Environment Setup" -msgstr "环境设定" +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "快速入门 JAX" -#: ../../source/tutorial-quickstart-xgboost.rst:48 +#: ../../source/tutorial-quickstart-mlx.rst:6 #, fuzzy msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:51 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" -" go ahead and install ``xgboost``:" -msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:67 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:71 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " -"and other related functions:" -msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" - -#: ../../source/tutorial-quickstart-xgboost.rst:99 -msgid "Dataset partition and hyper-parameter selection" -msgstr "数据集划分和超参数选择" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:101 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" -msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:115 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" -"In this example, we split the dataset into 30 partitions with uniform " -"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " -"partition for the given client based on ``partition_id``:" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" -" :code:`node_id` 为给定客户端加载分区:" -#: ../../source/tutorial-quickstart-xgboost.rst:135 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for ``xgboost`` package." -msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:149 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" -"The functions of ``train_test_split`` and " -"``transform_dataset_to_dmatrix`` are defined as below:" -msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:190 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" -"The ``num_local_round`` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " -"metric." +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " -"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" -#: ../../source/tutorial-quickstart-xgboost.rst:195 -msgid "Flower client definition for XGBoost" -msgstr "用于 XGBoost 的 Flower 客户端定义" - -#: ../../source/tutorial-quickstart-xgboost.rst:197 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define ``XgbClient`` class inherited from " -"``fl.client.Client``." +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " -":code:`XgbClient` 类。" -#: ../../source/tutorial-quickstart-xgboost.rst:219 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"All required parameters defined above are passed to ``XgbClient``'s " -"constructor." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" -"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " -"insides ``XgbClient`` class as follows." +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " -":code:`evaluate` 方法如下。" -#: ../../source/tutorial-quickstart-xgboost.rst:236 -#, fuzzy -msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use ``get_parameters`` " -"and ``set_parameters`` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in ``get_parameters`` when it is " -"called by the server at the first round." +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " -":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " -":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" -#: ../../source/tutorial-quickstart-xgboost.rst:278 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " -"first set of trees. From the second round, we load the global model sent " -"from server to new build Booster object, and then update model weights on" -" local training data with function ``local_boost`` as follows:" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " -"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " -":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" -#: ../../source/tutorial-quickstart-xgboost.rst:298 +#: ../../source/tutorial-quickstart-mlx.rst:277 #, fuzzy +msgid "Putting everything together we have:" +msgstr "把所有东西放在一起" + +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"Given ``num_local_round``, we update trees by calling " -"``bst_input.update`` method. After training, the last " -"``N=num_local_round`` trees will be extracted to send to the server." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -"给定 :code:`num_local_round`,我们通过调用 " -":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " -"树并发送给服务器。" -#: ../../source/tutorial-quickstart-xgboost.rst:330 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" -"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" -" function to conduct evaluation on valid set. The AUC value will be " -"returned." -msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:333 -#, fuzzy +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" -"Now, we can create an instance of our class ``XgbClient`` and add one " -"line to actually run this client:" -msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:350 +#: ../../source/tutorial-quickstart-mlx.rst:390 #, fuzzy msgid "" -"That's it for the client. We only have to implement ``Client`` and call " -"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " -"client which server to connect to. In our case we can run the server and " -"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" -" run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the ``server_address`` we" -" point the client at." +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:360 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." -msgstr "" -"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" -" FL。" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" -#: ../../source/tutorial-quickstart-xgboost.rst:364 -#, fuzzy -msgid "" -"In a file named ``server.py``, import Flower and FedXgbBagging from " -"``flwr.server.strategy``." -msgstr "" -"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " -"FedXgbBagging。" +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "快速入门Pandas" -#: ../../source/tutorial-quickstart-xgboost.rst:367 -msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "我们首先定义了 XGBoost bagging聚合策略。" +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" -#: ../../source/tutorial-quickstart-xgboost.rst:401 -#, fuzzy +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" -"We use two clients for this example. An ``evaluate_metrics_aggregation`` " -"function is defined to collect and wighted average the AUC values from " -"clients. The ``config_func`` function is to return the current FL round " -"number to client's ``fit()`` and ``evaluate()`` methods." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " -"值并求取平均值。" - -#: ../../source/tutorial-quickstart-xgboost.rst:406 -msgid "Then, we start the server:" -msgstr "然后,我们启动服务器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:418 -msgid "Tree-based bagging aggregation" -msgstr "基于树的bagging聚合" +"请参阅 `完整代码示例 `_\" 了解更多信息。" -#: ../../source/tutorial-quickstart-xgboost.rst:420 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." -msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:422 +#: ../../source/tutorial-quickstart-pytorch.rst:6 #, fuzzy msgid "" -"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " -"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " -"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " -"``evaluate`` methods as follows:" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " -":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " -":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:519 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " -"trees by calling ``aggregate()`` function:" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" -" 树:" -#: ../../source/tutorial-quickstart-xgboost.rst:579 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -"``_get_tree_nums``. Then, the fetched information will be aggregated. " -"After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " -"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:584 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" - -#: ../../source/tutorial-quickstart-xgboost.rst:588 -msgid "Launch Federated XGBoost!" -msgstr "启动联邦 XGBoost!" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:664 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in ``metrics_distributed``." -" One can see that the average AUC increases over FL rounds." +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " -"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" -#: ../../source/tutorial-quickstart-xgboost.rst:668 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" -"The full `source code `_ for this example can be found in ``examples" -"/xgboost-quickstart``." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" - -#: ../../source/tutorial-quickstart-xgboost.rst:673 -msgid "Comprehensive Federated XGBoost" -msgstr "综合的联邦 XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:675 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" -"comprehensive 示例 (`完整代码 " -"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" - -#: ../../source/tutorial-quickstart-xgboost.rst:685 -#, fuzzy -msgid "Cyclic training" -msgstr "集中式训练" -#: ../../source/tutorial-quickstart-xgboost.rst:687 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " -"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " -"树将传递给下一个客户端,作为下一轮提升的初始化模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -#, fuzzy -msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" -msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" - -#: ../../source/tutorial-quickstart-xgboost.rst:733 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"The customised ``ClientManager`` samples all available clients in each FL" -" round based on the order of connection to the server. Then, we define a " -"new strategy ``FedXgbCyclic`` in " -"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " -":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " -":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:775 +#: ../../source/tutorial-quickstart-pytorch.rst:348 #, fuzzy msgid "" -"Unlike the original ``FedAvg``, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding ``aggregate_fit``." +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " -"将接收到的客户端模型复制为全局模型。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:778 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 #, fuzzy +msgid "Video tutorial" +msgstr "教程" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" -" ensure the clients to be sequentially selected given FL round:" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " -"轮中按顺序选择客户:" -#: ../../source/tutorial-quickstart-xgboost.rst:840 -msgid "Customised data partitioning" -msgstr "定制数据分区" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "快速入门 PyTorch Lightning" -#: ../../source/tutorial-quickstart-xgboost.rst:842 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 #, fuzzy msgid "" -"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " -"instantiate the data partitioner based on the given ``num_partitions`` " -"and ``partitioner_type``. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data " -"quantity (uniform, linear, square, exponential)." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " -":code:`num_partitions` 和 :code:`partitioner_type` " -"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:873 -msgid "Customised centralised/distributed evaluation" -msgstr "定制的集中/分布式评估" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:875 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"To facilitate centralised evaluation, we define a function in " -"``server_utils.py``:" -msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:907 -#, fuzzy +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 msgid "" -"This function returns a evaluation function which instantiates a " -"``Booster`` object and loads the global model weights to it. The " -"evaluation is conducted by calling ``eval_set()`` method, and the tested " -"AUC value is reported." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " -":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" -#: ../../source/tutorial-quickstart-xgboost.rst:911 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 #, fuzzy msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the ``evaluate()`` method insides the " -"``XgbClient`` class in ``client_utils.py``." +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " -":code:`evaluate()` 方法。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:916 -#, fuzzy -msgid "Flower simulation" -msgstr "运行模拟" +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:918 +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "scikit-learn快速入门" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:6 #, fuzzy msgid "" -"We also provide an example code (``sim.py``) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." -msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" +"In this federated learning tutorial we will learn how to train a Logistic" +" Regression on MNIST using Flower and scikit-learn. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:954 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"After importing all required packages, we define a ``main()`` function to" -" perform the simulation process:" -msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" +"Let's use ``flwr new`` to create a complete Flower+scikit-learn project. " +"It will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using |fedavg|_ The dataset " +"will be partitioned using |flowerdatasets|_'s |iidpartitioner|_" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1010 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a ``list``. After the simulation begins, the " -"clients won't need to pre-process their partitions again." -msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``sklearn``), give a name to your project, " +"and type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1014 -#, fuzzy -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "然后,我们定义策略和其他超参数:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `MNIST `_ dataset. In this" +" example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. Each ``ClientApp`` will call this function " +"to create dataloaders with the data that correspond to their data " +"partition." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1065 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:140 msgid "" -"After that, we start the simulation by calling " -"``fl.simulation.start_simulation``:" -msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" +"We define the |logisticregression|_ model from scikit-learn in the " +"``get_model()`` function:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1085 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 msgid "" -"One of key parameters for ``start_simulation`` is ``client_fn`` which " -"returns a function to construct a client. We define it as follows:" +"To perform the training and evaluation, we will make use of the " +"``.fit()`` and ``.score()`` methods available in the " +"``LogisticRegression`` class." msgstr "" -":code:`start_simulation` 的一个关键参数是 " -":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:1126 -msgid "Arguments parser" -msgstr "参数解析器" +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 +msgid "" +"The main changes we have to make to use scikit-learn with Flower will be " +"found in the ``get_model_params()``, ``set_model_params()``, and " +"``set_initial_params()`` functions. In ``get_model_params()``, the " +"coefficients and intercept of the logistic regression model are extracted" +" and represented as a list of NumPy arrays. In ``set_model_params()``, " +"that's the opposite: given a list of NumPy arrays it applies them to an " +"existing ``LogisticRegression`` model. Finally, in " +"``set_initial_params()``, we initialize the model parameters based on the" +" MNIST dataset, which has 10 classes (corresponding to the 10 digits) and" +" 784 features (corresponding to the size of the MNIST image array, which " +"is 28 × 28). Doing this is fairly easy in scikit-learn." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1128 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"In ``utils.py``, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. " -"Let's first see the sever side:" -msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" +"The rest of the functionality is directly inspired by the centralized " +"case:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1175 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:226 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with ``--centralised-eval``, the sever " -"will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"``context`` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" -"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" -#: ../../source/tutorial-quickstart-xgboost.rst:1180 -msgid "Then, the argument parser on client side:" -msgstr "然后是客户端的参数解析器:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:257 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a zero-initialized model that will " +"server as the global model to be federated. Note that the values of " +"``num-server-rounds``, ``penalty``, and ``local-epochs`` are read from " +"the run config. You can find the default values defined in the " +"``pyproject.toml``." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1234 -#, fuzzy +#: ../../source/tutorial-quickstart-scikitlearn.rst:295 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting ``--centralised-eval``, as well as an option to perform " -"scaled learning rate based on the number of clients by setting " -"``--scaled-lr``." -msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" +"Congratulations! You've successfully built and run your first federated " +"learning system in scikit-learn." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:300 #, fuzzy -msgid "We also have an argument parser for simulation:" -msgstr "我们还有一个用于模拟的参数解析器:" +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_sklearn_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:1317 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 #, fuzzy -msgid "This integrates all arguments for both client and server sides." -msgstr "这整合了客户端和服务器端的所有参数。" +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:1320 -msgid "Example commands" -msgstr "命令示例" +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "快速入门 TensorFlow" -#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 #, fuzzy msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:1329 -msgid "Then, on each client terminal, we start the clients:" -msgstr "然后,我们在每个客户终端上启动客户机:" +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1335 -#, fuzzy -msgid "To run the same experiment with Flower simulation:" -msgstr "运行与 Flower 模拟相同的实验:" +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1341 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" ``examples/xgboost-comprehensive``." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "从零开始制定策略" +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and the Flower framework (`part 1 " -"`__) and we learned how strategies can be used to customize " -"the execution on both the server and the clients (`part 2 " -"`__)." +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__)。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg using " -"the Flower framework, Flower Datasets, and PyTorch." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " -"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Flower Discuss and the Flower Slack to connect, " -"ask questions, and get help: - `Join Flower Discuss " -"`__ We'd love to hear from you in the " -"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " -"Beginners``. - `Join Flower Slack `__ We'd " -"love to hear from you in the ``#introductions`` channel! If anything is " -"unclear, head over to the ``#questions`` channel." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " -"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 #, fuzzy -msgid "Let's build a new ``Strategy`` from scratch! 🌼" -msgstr "让我们从头开始构建一个新的``Strategy``!" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 -msgid "Preparation" -msgstr "准备工作" +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 -msgid "Installing dependencies" -msgstr "安装依赖项" +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 -msgid "First, we install the necessary packages:" -msgstr "首先,我们安装必要的软件包:" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "XGBoost快速入门" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" +#: ../../source/tutorial-quickstart-xgboost.rst:7 +msgid "XGBoost" +msgstr "XGBoost" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +#: ../../source/tutorial-quickstart-xgboost.rst:9 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 -msgid "Data loading" -msgstr "数据加载" +"EXtreme Gradient " +"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" +" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." -msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "Model training/evaluation" -msgstr "模型培训/评估" +#: ../../source/tutorial-quickstart-xgboost.rst:19 +#, fuzzy +msgid "Why Federated XGBoost?" +msgstr "为什么选择联邦 XGBoost?" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +#: ../../source/tutorial-quickstart-xgboost.rst:21 +#, fuzzy msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 -msgid "Flower client" -msgstr "Flower 客户端" +"As the demand for data privacy and decentralized learning grows, there's " +"an increasing requirement to implement federated XGBoost systems for " +"specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#: ../../source/tutorial-quickstart-xgboost.rst:25 #, fuzzy msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``partition_id`` to the client and use it log additional details. We then" -" create an instance of ``ClientApp`` and pass it the ``client_fn``." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data privacy" +" is paramount. Given the robustness and efficiency of XGBoost, combining " +"it with federated learning offers a promising solution for these specific" +" challenges." msgstr "" -"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " -"传递给客户端,并使用它记录其他详细信息:" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " +"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 -msgid "Let's test what we have so far before we continue:" -msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "Environment Setup" +msgstr "环境设定" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 -msgid "Build a Strategy from scratch" -msgstr "从零开始构建策略" +#: ../../source/tutorial-quickstart-xgboost.rst:33 +#, fuzzy +msgid "" +"In this tutorial, we learn how to train a federated XGBoost model on the " +"HIGGS dataset using Flower and the ``xgboost`` package to perform a " +"binary classification task. We use a simple example (`full code xgboost-" +"quickstart `_) to demonstrate how federated XGBoost works, and then we " +"dive into a more complex comprehensive example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " +"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " +"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" +"comprehensive `_),以运行各种实验。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +#: ../../source/tutorial-quickstart-xgboost.rst:42 +#, fuzzy msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"It is recommended to create a virtual environment and run everything " +"within a :doc:`virtualenv `." msgstr "" -"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " -"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +#: ../../source/tutorial-quickstart-xgboost.rst:45 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 -msgid "Recap" -msgstr "回顾" +#: ../../source/tutorial-quickstart-xgboost.rst:52 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +#: ../../source/tutorial-quickstart-xgboost.rst:60 +#, fuzzy +msgid "The Configurations" +msgstr "配置值" + +#: ../../source/tutorial-quickstart-xgboost.rst:62 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"We define all required configurations / hyper-parameters inside the " +"``pyproject.toml`` file:" msgstr "" -"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " -"``Strategy`` " -"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#: ../../source/tutorial-quickstart-xgboost.rst:84 #, fuzzy msgid "" -"Before you continue, make sure to join the Flower community on Flower " -"Discuss (`Join Flower Discuss `__) and on " -"Slack (`Join Slack `__)." +"The ``local-epochs`` represents the number of iterations for local tree " +"boost. We use CPU for the training in default. One can assign it to a GPU" +" by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " +"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/tutorial-quickstart-xgboost.rst:91 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `HIGGS` dataset." +msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +#: ../../source/tutorial-quickstart-xgboost.rst:105 +#, fuzzy msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"In this example, we split the dataset into 20 partitions with uniform " +"distribution (`IidPartitioner `_)." +" Then, we load the partition for the given client based on " +"``partition_id``." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" +" :code:`node_id` 为给定客户端加载分区:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "自定义客户端" +#: ../../source/tutorial-quickstart-xgboost.rst:110 +#, fuzzy +msgid "" +"Subsequently, we train/test split using the given partition (client's " +"local data), and reformat data to DMatrix for the ``xgboost`` package." +msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#: ../../source/tutorial-quickstart-xgboost.rst:124 +#, fuzzy msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:151 +#, fuzzy +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Let's first see how we define " +"Flower client for XGBoost. We follow the general rule to define " +"``FlowerClient`` class inherited from ``fl.client.Client``." msgstr "" -"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " +":code:`XgbClient` 类。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/tutorial-quickstart-xgboost.rst:176 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"All required parameters defined above are passed to ``FlowerClient``'s " +"constructor." msgstr "" -"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " -"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" -" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#: ../../source/tutorial-quickstart-xgboost.rst:178 #, fuzzy msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``! 🌼" -msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 -msgid "Step 0: Preparation" -msgstr "步骤 0:准备工作" +"Then, we override ``fit`` and ``evaluate`` methods insides " +"``FlowerClient`` class as follows." +msgstr "" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " +":code:`evaluate` 方法如下。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#: ../../source/tutorial-quickstart-xgboost.rst:217 #, fuzzy msgid "" -"Let's now define a loading function for the CIFAR-10 training and test " -"set, partition them into ``num_partitions`` smaller datasets (each split " -"into training and validation set), and wrap everything in their own " -"``DataLoader``." +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``_local_boost`` as follows:" msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 -msgid "Step 1: Revisiting NumPyClient" -msgstr "步骤 1:重温 NumPyClient" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " +"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " +":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#: ../../source/tutorial-quickstart-xgboost.rst:237 #, fuzzy msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " -"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " -"``client_fn`` 的函数来创建该类的实例:" +"给定 :code:`num_local_round`,我们通过调用 " +":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " +"树并发送给服务器。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +#: ../../source/tutorial-quickstart-xgboost.rst:265 +#, fuzzy msgid "" -"Then, we define the function ``numpyclient_fn`` that is used by Flower to" -" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" -" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." -msgstr "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#: ../../source/tutorial-quickstart-xgboost.rst:271 #, fuzzy msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Next, we configure the number of federated learning " -"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " -"config:" +"After the local training on clients, clients' model updates are sent to " +"the *server*, which aggregates them to produce a better model. Finally, " +"the *server* sends this improved model version back to each *client* to " +"complete a federated round." msgstr "" -"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " -"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" +" FL。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +#: ../../source/tutorial-quickstart-xgboost.rst:275 +#, fuzzy msgid "" -"Finally, we specify the resources for each client and run the simulation " -"to see the output we get:" +"In the file named ``server_app.py``, we define a strategy for XGBoost " +"bagging aggregation:" +msgstr "我们首先定义了 XGBoost bagging聚合策略。" + +#: ../../source/tutorial-quickstart-xgboost.rst:308 +#, fuzzy +msgid "" +"An ``evaluate_metrics_aggregation`` function is defined to collect and " +"wighted average the AUC values from clients. The ``config_func`` function" +" is to return the current FL round number to client's ``fit()`` and " +"``evaluate()`` methods." msgstr "" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " +"值并求取平均值。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#: ../../source/tutorial-quickstart-xgboost.rst:313 #, fuzzy +msgid "Tree-based Bagging Aggregation" +msgstr "基于树的bagging聚合" + +#: ../../source/tutorial-quickstart-xgboost.rst:315 msgid "" -"This works as expected, ten clients are training for three rounds of " -"federated learning." -msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#: ../../source/tutorial-quickstart-xgboost.rst:317 #, fuzzy msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``run_simulation`` launches the ``ClientApp`` object which in turn calls " -"the function ``numpyclient_fn`` to create an instance of our " -"``FlowerNumPyClient`` (along with loading the model and the data)." +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " -"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " +":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " +":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +#: ../../source/tutorial-quickstart-xgboost.rst:414 +#, fuzzy msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " -"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " -"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " -"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " -"只是建立在``Client``之上的便捷抽象类。" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" +" 树:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +#: ../../source/tutorial-quickstart-xgboost.rst:474 +#, fuzzy msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " +"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +#: ../../source/tutorial-quickstart-xgboost.rst:479 +#, fuzzy msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" +"After traversal of all clients' models, a new global model is generated, " +"followed by serialisation, and sending the global model back to each " +"client." +msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" +#: ../../source/tutorial-quickstart-xgboost.rst:483 +msgid "Launch Federated XGBoost!" +msgstr "启动联邦 XGBoost!" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +#: ../../source/tutorial-quickstart-xgboost.rst:533 +#, fuzzy msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``History (metrics, " +"distributed, evaluate)``. One can see that the average AUC increases over" +" FL rounds." +msgstr "" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " +"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +#: ../../source/tutorial-quickstart-xgboost.rst:547 +#, fuzzy msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Check the full `source code " +"`_ " +"for this example in ``examples/xgboost-quickstart`` in the Flower GitHub " +"repository." msgstr "" -"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " -"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " -"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " -"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +#: ../../source/tutorial-quickstart-xgboost.rst:552 +msgid "Comprehensive Federated XGBoost" +msgstr "综合的联邦 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:554 +#, fuzzy msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"Now that you know how federated XGBoost works with Flower, it's time to " +"run some more comprehensive experiments by customising the experimental " +"settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised / distributed evaluation. Let's take a look!" msgstr "" -"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " -"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " -"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" +"comprehensive 示例 (`完整代码 " +"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +#: ../../source/tutorial-quickstart-xgboost.rst:562 +#, fuzzy +msgid "Cyclic Training" +msgstr "集中式训练" + +#: ../../source/tutorial-quickstart-xgboost.rst:564 +#, fuzzy msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " -"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " -"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" -" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" +"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " +"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " +"树将传递给下一个客户端,作为下一轮提升的初始化模型。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 -msgid "Step 3: Custom serialization" -msgstr "步骤 3:自定义序列化" +#: ../../source/tutorial-quickstart-xgboost.rst:570 +#, fuzzy +msgid "To do this, we first customise a ``ClientManager`` in ``server_app.py``:" +msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +#: ../../source/tutorial-quickstart-xgboost.rst:610 +#, fuzzy msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to the " +"next client." +msgstr "" +"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " +":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " +":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +#: ../../source/tutorial-quickstart-xgboost.rst:652 +#, fuzzy msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." msgstr "" -"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" -" Python 对象。" +"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " +"将接收到的客户端模型复制为全局模型。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +#: ../../source/tutorial-quickstart-xgboost.rst:655 +#, fuzzy msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" +"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " +"轮中按顺序选择客户:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." -msgstr "" -"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " -"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " -"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 -msgid "Our custom serialization/deserialization functions" -msgstr "我们的定制序列化/反序列化功能" +#: ../../source/tutorial-quickstart-xgboost.rst:685 +#, fuzzy +msgid "Customised Data Partitioning" +msgstr "定制数据分区" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +#: ../../source/tutorial-quickstart-xgboost.rst:687 +#, fuzzy msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"In ``task.py``, we use the ``instantiate_fds`` function to instantiate " +"Flower Datasets and the data partitioner based on the given " +"``partitioner_type`` and ``num_partitions``. Currently, we provide four " +"supported partitioner type to simulate the uniformity/non-uniformity in " +"data quantity (uniform, linear, square, exponential)." msgstr "" -"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " -"``sparse_bytes_too_ndarray`` 中。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " +":code:`num_partitions` 和 :code:`partitioner_type` " +"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 -msgid "Client-side" -msgstr "客户端" +#: ../../source/tutorial-quickstart-xgboost.rst:726 +#, fuzzy +msgid "Customised Centralised / Distributed Evaluation" +msgstr "定制的集中/分布式评估" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +#: ../../source/tutorial-quickstart-xgboost.rst:728 +#, fuzzy msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" +"To facilitate centralised evaluation, we define a function in " +"``server_app.py``:" +msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +#: ../../source/tutorial-quickstart-xgboost.rst:759 +#, fuzzy msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"This function returns an evaluation function, which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " -"序列化从网络中获取的参数。" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " +":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +#: ../../source/tutorial-quickstart-xgboost.rst:763 +#, fuzzy msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_app.py``." msgstr "" -"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " -"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 -msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " +":code:`evaluate()` 方法。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 -msgid "Server-side" -msgstr "服务器端" +#: ../../source/tutorial-quickstart-xgboost.rst:768 +#, fuzzy +msgid "Arguments Explainer" +msgstr "参数解析器" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +#: ../../source/tutorial-quickstart-xgboost.rst:770 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"We define all hyper-parameters under ``[tool.flwr.app.config]`` entry in " +"``pyproject.toml``:" msgstr "" -"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " -"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "正如你所看到的,``evaluate``中只修改了一行:" +#: ../../source/tutorial-quickstart-xgboost.rst:799 +#, fuzzy +msgid "" +"On the server side, we allow user to specify training strategies / FL " +"rounds / participating clients / clients for evaluation, and evaluation " +"fashion. Note that with ``centralised-eval = true``, the sever will do " +"centralised evaluation and all functionalities for client evaluation will" +" be disabled." +msgstr "" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" +"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +#: ../../source/tutorial-quickstart-xgboost.rst:804 +#, fuzzy msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" +"On the client side, we can define various options for client data " +"partitioning. Besides, clients also have an option to conduct evaluation " +"on centralised test set by setting ``centralised-eval = true``, as well " +"as an option to perform scaled learning rate based on the number of " +"clients by setting ``scaled-lr = true``." +msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 -msgid "And then serialize the aggregated result:" -msgstr "然后将汇总结果序列化:" +#: ../../source/tutorial-quickstart-xgboost.rst:810 +#, fuzzy +msgid "Example Commands" +msgstr "命令示例" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 -msgid "We can now run our custom serialization example!" -msgstr "现在我们可以运行自定义序列化示例!" +#: ../../source/tutorial-quickstart-xgboost.rst:812 +msgid "To run bagging aggregation for 5 rounds evaluated on centralised test set:" +msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +#: ../../source/tutorial-quickstart-xgboost.rst:818 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"To run cyclic training with linear partitioner type evaluated on " +"centralised test set:" msgstr "" -"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " -"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +#: ../../source/tutorial-quickstart-xgboost.rst:827 +#, fuzzy msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 -msgid "`Read Flower Docs `__" -msgstr "阅读Flower文档 `__" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive`` in the Flower GitHub repository." +msgstr "" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#: ../../source/tutorial-quickstart-xgboost.rst:833 #, fuzzy -msgid "`Check out Flower Code Examples `__" -msgstr "查看 Flower 代码示例 `__" +msgid "Video Tutorial" +msgstr "教程" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +#: ../../source/tutorial-quickstart-xgboost.rst:837 msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "使用 \"Flower Baselines \"进行研究 `__" +"The video shown below shows how to setup a XGBoost + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "从零开始制定策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 #, fuzzy msgid "" -"`Watch Flower AI Summit 2024 videos `__" -msgstr "观看 2023 年Flower峰会视频 `__" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "开始使用Flower" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "欢迎阅读Flower联邦学习教程!" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " +"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 #, fuzzy msgid "" -"In this notebook, we'll build a federated learning system using the " -"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " -"for the model training pipeline and data loading. In part 2, we federate " -"the PyTorch project using Flower." +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" -"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " -"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " +"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 #, fuzzy -msgid "Let's get started! 🌼" -msgstr "让我们开始吧!" +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "让我们从头开始构建一个新的``Strategy``!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "准备工作" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 msgid "" -"Before we begin with any actual code, let's make sure that we have " +"Before we begin with the actual code, let's make sure that we have " "everything we need." -msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" +msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 -#, fuzzy -msgid "Install dependencies" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" msgstr "安装依赖项" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 -#, fuzzy -msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "首先,我们安装必要的软件包:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 -#, fuzzy -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " "offer GPU acceleration. If you see an error related to GPU availability " "in one of the following sections, consider switching back to CPU-based " "execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " @@ -28451,5496 +28317,9511 @@ msgstr "" "``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " "加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 -#, fuzzy -msgid "Load the data" -msgstr "加载数据" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "数据加载" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " -"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 -#, fuzzy -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (the data " -"is naturally partitioned)." -msgstr "" -"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " -"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "模型培训/评估" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 msgid "" -"Each organization will act as a client in the federated learning system. " -"Having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 -#, fuzzy -msgid "" -"We use the Flower Datasets library (``flwr-datasets``) to partition " -"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" -" small training and test set for each of the ten organizations and wrap " -"each of these into a PyTorch ``DataLoader``:" -msgstr "" -"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " -"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Flower 客户端" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 #, fuzzy msgid "" -"We now have a function that can return a training set and validation set " -"(``trainloader`` and ``valloader``) representing one dataset from one of " -"ten different organizations. Each ``trainloader``/``valloader`` pair " -"contains 4000 training examples and 1000 validation examples. There's " -"also a single ``testloader`` (we did not split the test set). Again, this" -" is only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." msgstr "" -"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " -"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " -"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " +"传递给客户端,并使用它记录其他详细信息:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 -#, fuzzy -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " -"move on:" -msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "从零开始构建策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 msgid "" -"The output above shows a random batch of images from the ``trainloader`` " -"from the first of ten partitions. It also prints the labels associated " -"with each image (i.e., one of the ten possible labels we've seen above). " -"If you run the cell again, you should see another batch of images." +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " -"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "步骤 1:使用 PyTorch 进行集中训练" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " +"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." -msgstr "" -"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" -" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " -"minute blitz " -"`__。" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 -#, fuzzy -msgid "Define the model" -msgstr "定义模型" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "回顾" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -"我们使用` PyTorch 教程 " -"`__ 中描述的简单 CNN:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 -msgid "Let's continue with the usual training and test functions:" -msgstr "让我们继续进行常规的训练和测试功能:" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " +"``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 #, fuzzy -msgid "Train the model" -msgstr "训练模型" +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``partition_id=0``). This simulates the reality of most machine learning" -" projects today: each organization has their own data and trains models " -"only on this internal data:" -msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simple centralized training pipeline that sets " -"the stage for what comes next - federated learning!" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " -"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "步骤 2:使用 Flower 联邦学习" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "自定义客户端" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." -msgstr "" -"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " -"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -#, fuzzy -msgid "Update model parameters" -msgstr "更新模型参数" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 -#, fuzzy -msgid "" -"In federated learning, the server sends global model parameters to the " -"client, and the client updates the local model with parameters received " -"from the server. It then trains the model on the local data (which " -"changes the model parameters locally) and sends the updated/changed model" -" parameters back to the server (or, alternatively, it sends just the " -"gradients back to the server, not the full model parameters)." -msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__),并从头开始构建了我们自己的定制策略(`part 3 " +"`__)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " -"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " +"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" +" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 #, fuzzy msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which the Flower ``NumPyClient`` knows how to " -"serialize/deserialize):" -msgstr "" -"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " -"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -#, fuzzy -msgid "Define the Flower ClientApp" -msgstr "Flower 客户端。" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "步骤 0:准备工作" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 #, fuzzy msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" -"side and client-side code, respectively." +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "步骤 1:重温 NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 #, fuzzy msgid "" -"The first step toward creating a ``ClientApp`` is to implement a " -"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " -"use ``NumPyClient`` in this tutorial because it is easier to implement " -"and requires us to write less boilerplate. To implement ``NumPyClient``, " -"we create a subclass that implements the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters``: 返回当前本地模型参数" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " +"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " +"``client_fn`` 的函数来创建该类的实例:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 msgid "" -"``fit``: Receive model parameters from the server, train the model on the" -" local data, and return the updated model parameters to the server" -msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 #, fuzzy msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model on the local data, and return the evaluation result to the server" -msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " +"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" msgstr "" -"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " -"客户端实现,它将一切都整合在一起:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 #, fuzzy msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient`` (one on each of the " -"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " -"the respective instance when the server selects a particular client for " -"training (and ``FlowerClient.evaluate`` for evaluation)." -msgstr "" -"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " -"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " -"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " -"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " -"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " -"``FlowerClient.evaluate``)。" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 #, fuzzy msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients *on a single machine*. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." msgstr "" -"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " -"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " -"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " +"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function that creates a ``FlowerClient`` instance on demand. We typically" -" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" -" needs an instance of one particular client to call ``fit`` or " -"``evaluate`` (those instances are usually discarded after use, so they " -"should not keep any local state). In federated learning experiments using" -" Flower, clients are identified by a partition ID, or ``partition-id``. " -"This ``partition-id`` is used to load different local data partitions for" -" different clients, as can be seen below. The value of ``partition-id`` " -"is retrieved from the ``node_config`` dictionary in the ``Context`` " -"object, which holds the information that persists throughout each " -"training round." +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " -"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " -"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " -"``evaluate`` 时,它就会调用 " -"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " -"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " +"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " +"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " +"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"只是建立在``Client``之上的便捷抽象类。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 msgid "" -"With this, we have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. Last, but definitely not least, we" -" create an instance of ``ClientApp`` and pass it the ``client_fn``. " -"``ClientApp`` is the entrypoint that a running Flower client uses to call" -" your code (as defined in, for example, ``FlowerClient.fit``)." -msgstr "" -"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " -"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " -"``flwr.simulation.start_simulation`` 启动实际模拟。" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 -#, fuzzy -msgid "Define the Flower ServerApp" -msgstr "Flower 服务器。" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 msgid "" -"On the server side, we need to configure a strategy which encapsulates " -"the federated learning approach/algorithm, for example, *Federated " -"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " -"can also use our own strategy implementations to customize nearly all " -"aspects of the federated learning approach. For this example, we use the " -"built-in ``FedAvg`` implementation and customize it using a few basic " -"parameters:" -msgstr "" -"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " -"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 msgid "" -"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " -"function ``server_fn``. In ``server_fn``, we pass an instance of " -"``ServerConfig`` for defining the number of federated learning rounds " -"(``num_rounds``) and we also pass the previously created ``strategy``. " -"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" -" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " -"entrypoint that Flower uses to call all your server-side code (for " -"example, the strategy)." -msgstr "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 -#, fuzzy -msgid "Run the training" -msgstr "开始训练" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 msgid "" -"In simulation, we often want to control the amount of resources each " -"client can use. In the next cell, we specify a ``backend_config`` " -"dictionary with the ``client_resources`` key (required) for defining the " -"amount of CPU and GPU resources each client can access." +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " +"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " +"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " +"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 msgid "" -"The last step is the actual call to ``run_simulation`` which - you " -"guessed it - runs the simulation. ``run_simulation`` accepts a number of " -"arguments: - ``server_app`` and ``client_app``: the previously created " -"``ServerApp`` and ``ClientApp`` objects, respectively - " -"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" -" the number of clients for Flower simulation - ``backend_config``: the " -"resource allocation used in this simulation" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " +"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " +"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 -msgid "Behind the scenes" -msgstr "幕后" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " +"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" +" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "步骤 3:自定义序列化" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 -#, fuzzy, python-format +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 msgid "" -"When we call ``run_simulation``, we tell Flower that there are 10 clients" -" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." -" Flower then goes ahead an asks the ``ServerApp`` to issue an " -"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " -"knows that it should select 100% of the available clients " -"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " -"(i.e., 100% of 10)." -msgstr "" -"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " -"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" -" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 msgid "" -"Flower then asks the selected 10 clients to train the model. Each of the " -"10 ``ClientApp`` instances receives a message, which causes it to call " -"``client_fn`` to create an instance of ``FlowerClient``. It then calls " -"``.fit()`` on each the ``FlowerClient`` instances and returns the " -"resulting model parameter updates to the ``ServerApp``. When the " -"``ServerApp`` receives the model parameter updates from the clients, it " -"hands those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -"然后,Flower 会要求选定的 10 " -"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 -msgid "Where's the accuracy?" -msgstr "准确度在哪里找?" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" +" Python 对象。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" -msgstr "" -"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " -"float(准确度)}``去哪儿了?" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " -"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " +"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " +"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "我们的定制序列化/反序列化功能" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" -" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " +"``sparse_bytes_too_ndarray`` 中。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." -msgstr "" -"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " -"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -"其他两类指标(`losses_centralized`` 和 " -"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "结束语" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " +"序列化从网络中获取的参数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " -"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " -"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " +"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "服务器端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " +"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "使用联邦学习策略" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "正如你所看到的,``evaluate``中只修改了一行:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "然后将汇总结果序列化:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "现在我们可以运行自定义序列化示例!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " -"`___)。" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " +"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "阅读Flower文档 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 #, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "查看 Flower 代码示例 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook again, using the Flower framework, " -"Flower Datasets, and PyTorch." -msgstr "" -"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " -"`PyTorch `__)。" +"`Use Flower Baselines for your research " +"`__" +msgstr "使用 \"Flower Baselines \"进行研究 `__" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 #, fuzzy -msgid "Let's move beyond FedAvg with Flower strategies! 🌼" -msgstr "让我们超越 FedAvg,采用Flower策略!" +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "观看 2023 年Flower峰会视频 `__" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "开始使用Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "欢迎阅读Flower联邦学习教程!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 #, fuzzy msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_partitions`` which allows us to call ``load_datasets`` with " -"different numbers of partitions." +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " -"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " +"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 -msgid "Strategy customization" -msgstr "策略定制" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "让我们开始吧!" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 -msgid "Server-side parameter **initialization**" -msgstr "服务器端参数 **初始化**" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "安装依赖项" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 #, fuzzy msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy. We create an " -"instance of ``Net()`` and get the paramaters as follows:" -msgstr "" -"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" -" 允许您直接将初始参数传递给策略:" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy msgid "" -"Next, we create a ``server_fn`` that returns the components needed for " -"the server. Within ``server_fn``, we create a Strategy that uses the " -"initial parameters." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 #, fuzzy -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. In " -"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " -"defining the number of federated learning rounds (``num_rounds``)." -msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" +msgid "Load the data" +msgstr "加载数据" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy msgid "" -"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " -"``server_fn``:" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " +"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy msgid "" -"Last but not least, we specify the resources for each client and run the " -"simulation." +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " +"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 #, fuzzy msgid "" -"If we look closely, we can see that the logs do not show any calls to the" -" ``FlowerClient.get_parameters`` method." -msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 -msgid "Starting with a customized strategy" -msgstr "从定制战略开始" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 #, fuzzy msgid "" -"We've seen the function ``run_simulation`` before. It accepts a number of" -" arguments, amongst them the ``server_app`` which wraps around the " -"strategy and number of training rounds, ``client_app`` which wraps around" -" the ``client_fn`` used to create ``FlowerClient`` instances, and the " -"number of clients to simulate which equals ``num_supernodes``." +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" msgstr "" -"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" +"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " +"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 -msgid "Server-side parameter **evaluation**" -msgstr "服务器端参数**评估**" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " +"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " +"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " +"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 -msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "步骤 1:使用 PyTorch 进行集中训练" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " -"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" +" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " +"minute blitz " +"`__。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "定义模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 msgid "" -"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " -"create a ``ServerApp`` that uses this strategy." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" +"我们使用` PyTorch 教程 " +"`__ 中描述的简单 CNN:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "让我们继续进行常规的训练和测试功能:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 #, fuzzy -msgid "Finally, we run the simulation." -msgstr "运行模拟" +msgid "Train the model" +msgstr "训练模型" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "向/从客户端发送/接收任意值" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" msgstr "" -"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " -"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " -"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " -"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " -"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" +"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " -"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " +"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "更新模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 #, fuzzy msgid "" -"Next, we'll pass this function to the FedAvg strategy before starting the" -" simulation:" -msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " -"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " +"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" msgstr "" -"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " -"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" -" ``evaluate`` 中的第三个返回值。" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " +"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 -msgid "Scaling federated learning" -msgstr "扩大联邦学习的规模" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 客户端。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy msgid "" -"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" -" since the Context is defined by the ``num_supernodes`` argument in " -"``run_simulation()``." +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 -#, fuzzy, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " -"available clients (so 25 clients) will be selected for training each " -"round:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters``: 返回当前本地模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " -"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " -"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " -"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " +"客户端实现,它将一切都整合在一起:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " +"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " +"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " +"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " +"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " +"``FlowerClient.evaluate``)。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " -"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " -"个客户端!" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " +"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " +"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." msgstr "" -"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " +"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " +"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " +"``evaluate`` 时,它就会调用 " +"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " +"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "什么是联邦学习?" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " +"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " +"``flwr.simulation.start_simulation`` 启动实际模拟。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 服务器。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" msgstr "" -"在本教程中,你将了解什么是联邦学习,用 Flower " -"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " +"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "开始训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" -" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "让我们开始吧!" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "经典机器学习" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "幕后" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " +"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" +" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"然后,Flower 会要求选定的 10 " +"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|ac0a9766e26044d6aea222a829859b20|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "准确度在哪里找?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " +"float(准确度)}``去哪儿了?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " +"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" +" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " +"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"其他两类指标(`losses_centralized`` 和 " +"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +msgid "Final remarks" +msgstr "结束语" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " +"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " +"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "使用联邦学习策略" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " +"`___)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " +"`PyTorch `__)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "让我们超越 FedAvg,采用Flower策略!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " +"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "策略定制" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "服务器端参数 **初始化**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" +" 允许您直接将初始参数传递给策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "从定制战略开始" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "服务器端参数**评估**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " +"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "运行模拟" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "向/从客户端发送/接收任意值" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " +"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " +"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " +"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " +"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " +"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " +"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" +" ``evaluate`` 中的第三个返回值。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "扩大联邦学习的规模" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " +"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " +"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " +"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " +"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " +"个客户端!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "什么是联邦学习?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"在本教程中,你将了解什么是联邦学习,用 Flower " +"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#, fuzzy +msgid "" +"🧑‍🏫 This tutorial starts from zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +#, fuzzy +msgid "Classical Machine Learning" +msgstr "经典机器学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#, fuzzy +msgid "" +"Before we begin discussing federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|80152fa658904be08c849b4a594b76e1|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "模型和数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|35b60a1068f944ce937ac2988661aad5|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "使用数据训练模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#, fuzzy +msgid "" +"In practice, the training data we work with doesn't originate on the " +"machine we train the model on." +msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#, fuzzy +msgid "" +"This data gets created \"somewhere else\". For instance, the data can " +"originate on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|efead7f2c2224b60b7b42705004c15e6|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "手机上的数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"值得一提的是,这个 \"其他地方 " +"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|5421fee4e7ed450c903cbcd8a9d8a5d4|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "数据存在于多种设备中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#, fuzzy +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all this data on a central " +"server. This server can be located somewhere in a data center, or " +"somewhere in the cloud." +msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|811fcf35e9214bd5b4e613e41f7c0a27|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "集中数据收集" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|e61d38b0948f4c07a7257755f3799b54|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "集中模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "经典机器学习面临的挑战" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#, fuzzy +msgid "" +"This classical machine learning approach we've just seen can be used in " +"some cases. Great examples include categorizing holiday photos, or " +"analyzing web traffic. Cases, where all the data is naturally available " +"on a centralized server." +msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|e82c29351e2e480087c61b939eb7c041|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "可集中管理" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|21ca40f4fb1a405c89098fd1d24880a4|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "无法集中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#, fuzzy +msgid "" +"There are many reasons why the classical centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#, fuzzy +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for machine learning training because those users live in " +"different parts of the world, and their data is governed by different " +"data protection regulations." +msgstr "" +"**法规**: " +"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**用户偏好**: " +"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**数据量**: " +"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "集中式机器学习不起作用的例子包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#, fuzzy +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models." +msgstr "用多家医院的敏感医疗记录训练癌症检测模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#, fuzzy +msgid "" +"Financial information from different organizations to detect financial " +"fraud." +msgstr "不同组织的财务信息,以侦查财务欺诈行为" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +#, fuzzy +msgid "Location data from your electric car to make better range prediction." +msgstr "通过电动汽车的定位数据更好地预测续航里程" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +#, fuzzy +msgid "End-to-end encrypted messages to train better auto-complete models." +msgstr "端到端加密信息可训练出更好的自动完成模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"像 `Brave `__浏览器或 `Signal " +"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +#, fuzzy +msgid "Federated Learning" +msgstr "联邦学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#, fuzzy +msgid "" +"Federated Learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's a one-liner explanation:" +msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +#, fuzzy +msgid "Centralized machine learning: move the data to the computation" +msgstr "集中式机器学习:将数据转移到计算中心" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +#, fuzzy +msgid "Federated (machine) Learning: move the computation to the data" +msgstr "联邦式(机器)学习:将计算转移到数据上" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#, fuzzy +msgid "" +"By doing so, Federated Learning enables us to use machine learning (and " +"other data science approaches) in areas where it wasn't possible before. " +"We can now train excellent medical AI models by enabling different " +"hospitals to work together. We can solve financial fraud by training AI " +"models on the data of different financial institutions. We can build " +"novel privacy-enhancing applications (such as secure messaging) that have" +" better built-in AI than their non-privacy-enhancing alternatives. And " +"those are just a few of the examples that come to mind. As we deploy " +"Federated Learning, we discover more and more areas that can suddenly be " +"reinvented because they now have access to vast amounts of previously " +"inaccessible data." +msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#, fuzzy +msgid "" +"So how does Federated Learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "联邦学习的五个步骤" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "步骤 0:初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|1351a2629c2c46d981b13b19f9fa45f0|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#, fuzzy +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts its" +" local training using the same model parameters. We often use only a few " +"of the connected nodes instead of all nodes. The reason for this is that " +"selecting more and more client nodes has diminishing returns." +msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|124c2c188b994c7ab1c862cfdb326923|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "发送全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" +"(mini-batches)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|42e1951c36f2406e93c7ae0ec5b299f9|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "根据本地数据进行训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "步骤 3:将模型参数更新返回服务器" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|ec637b8a84234d068995ee1ccb2dd3b1|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "发送模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "步骤 4:将模型更新聚合到新的全局模型中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " +"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#, fuzzy +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " +"*Federated Averaging* (`McMahan等人,2016 " +"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " +"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" +" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" +" 100 个示例的 10 倍。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|5bceb9d16b1a4d2db18d8a5b2f0cacb3|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "聚合模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " +"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " +"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" +" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" +" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +#, fuzzy +msgid "Federated Analytics" +msgstr "联邦分析" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " +"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " +"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " +"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|502b10044e864ca2b15282a393ab7faf|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +#, fuzzy +msgid "Final Remarks" +msgstr "结束语" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " +"构建一个简单的联邦学习系统。" + +#~ msgid "Before the release" +#~ msgstr "发布前" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " +#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" +#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " +#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" +#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " +#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "flwr(Python API 参考)" + +#~ msgid "..." +#~ msgstr "..." + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "使用不安全的服务器连接启动客户端:" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "server.strategy.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "server.strategy.FedAvgM" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "可配置的 FedAvg 动量策略实施。" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "训练期间使用客户的比例。默认为 0.1。" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "server.strategy.FedMedian" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "server.strategy.FedOpt" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "可配置的 FedAdagrad 策略实施。" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "Federated Optim 策略界面。" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "server.strategy.FedProx" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "可配置的 FedProx 策略实施。" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "server.strategy.FedAdagrad" + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "论文: https://arxiv.org/abs/2003.00295" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "server.strategy.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "server.strategy.FedYogi" + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "使用 Yogi 的自适应联合优化。" + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" + +#~ msgid "Paper: https://arxiv.org/abs/1803.01498" +#~ msgstr "论文:https://arxiv.org/abs/1803.01498" + +#~ msgid "server.strategy.Krum" +#~ msgstr "server.strategy.Krum" + +#~ msgid "Configurable Krum strategy implementation." +#~ msgstr "可配置的 Krum 策略实施。" + +#~ msgid "server.strategy.Bulyan" +#~ msgstr "server.strategy.Bulyan" + +#~ msgid "Bulyan strategy implementation." +#~ msgstr "Bulyan策略的实施。" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "server.strategy.FedXgbNnAvg" + +#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." +#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "server.strategy.DPFedAvgAdaptive" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" +#~ "**修复策略的错误返回类型** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" +#~ "两个方法(\"aggregate_fit \"和 " +#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgstr "" +#~ "** 更新 Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "对于客户端就需要做这么多。我们仅需要实现 " +#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" +#~ " :code:`\"0.0.0.0:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " +#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " +#~ "即可。字符串 :code:`\"[::]:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "``DataLoader`` 来包装由此产生的分割集:" + +#~ msgid "|e1dd4b4129b040bea23a894266227080|" +#~ msgstr "|e1dd4b4129b040bea23a894266227080|" + +#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" + +#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" + +#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" + +#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" + +#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" + +#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" + +#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" + +#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" + +#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" + +#~ msgid "|88002bbce1094ba1a83c9151df18f707|" +#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" + +#~ msgid "|391766aee87c482c834c93f7c22225e2|" +#~ msgstr "|391766aee87c482c834c93f7c22225e2|" + +#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" + +#~ msgid "|a23d9638f96342ef9d25209951e2d564|" +#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" + +#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.6.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" +#~ "将``!pip install -q 'flwr[simulation]' torch" +#~ " torchvision matplotlib``更改为``!pip install -q " +#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " +#~ "torch torchvision matplotlib``" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_numpy_client()` by pointing " +#~ "it at the same IP address we " +#~ "used in :code:`server.py`:" +#~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" +#~ " :code:`cifar.py` 加载数据和模型。使用函数 " +#~ ":code:`fl.client.start_numpy_client()` 启动 " +#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " +#~ "IP 地址:" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_numpy_client" +#~ " `_)" +#~ " in the sense that they can be" +#~ " configure by creating a class " +#~ "inheriting, for example, from " +#~ "`flwr.client.NumPyClient `_ and therefore " +#~ "behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " +#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " +#~ "`_\" " +#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " +#~ "管理的客户端还包括:" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" + +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" + +#~ msgid "driver" +#~ msgstr "服务器" + +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" + +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" + +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" + +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" + +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" + +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" + +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" + +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" + +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "基本图像标签" + +#~ msgid "The image tag of the base image." +#~ msgstr "基础图像的图像标记。" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" +#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " +#~ ":ref:`changelogentry` 附录中的一些示例和细节。" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "打开 PR(如上图所示)" + +#~ msgid "How to write a good PR title" +#~ msgstr "如何撰写好的公关标题" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " +#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " +#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" +#~ " 遵守语法规则,以确保清晰。" + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" + +#~ msgid "Implement Algorithm" +#~ msgstr "执行算法" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "在代码库中添加 my_new_file.py" + +#~ msgid "Improve code in module" +#~ msgstr "改进模块中的代码" + +#~ msgid "Change SomeModule" +#~ msgstr "更改 SomeModule" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "移除不必要的 XGBoost 依赖性" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "删除 FedAvg 子类化策略中的多余属性" + +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "添加新的惊人库,用于改进模拟引擎" + +#~ msgid "Changelog entry" +#~ msgstr "更新日志" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" + +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" + +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" + +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" + +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" + +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr " 用于将 PR 划分为一般改进。" + +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "表示不将 PR 添加到更新日志中" + +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr " 是指在 PR 中添加一般基线更改" + +#~ msgid " is to add a general examples change to the PR" +#~ msgstr " 是在 PR 中添加对一般示例的修改" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" + +#~ msgid "Note that only one token should be used." +#~ msgstr "请注意,只能使用一个标记。" + +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "示例: MXNet - 运行联邦式 MXNet" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" +#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " +#~ "`PyTorch - 从集中式到联邦式 " +#~ "`_ 教程类似的示例结构。MXNet" +#~ " 和 PyTorch 非常相似,参考 `此处 " +#~ "`_对 MXNet " +#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " +#~ "`" +#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "使用 MXNet 进行 MNIST 训练" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" +#~ "首先,我们将简要介绍基于 :code:`Sequential` " +#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " +#~ "`_。" + +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" +#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " +#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " +#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" + +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" +#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " +#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " +#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" + +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" +#~ "到目前为止,如果你以前使用过 MXNet(甚至 " +#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" + +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet 结合 Flower" + +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" +#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " +#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " +#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" + +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" +#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " +#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " +#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" +#~ "实现 Flower *client*基本上意味着要实现 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" +#~ " 或 MXNet),:code:`NumPyClient` 比 " +#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" +#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " +#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" +#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " +#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" +#~ " :code:`NumPyClient` 子类告知 Flower " +#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "模型和数据" +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " +#~ ":code:`Sequential` 模型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 -msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|36cd6e248b1443ce8a82b5a025bba368|" -msgstr "" +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" +#~ "此示例的完整源代码在:\"MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" +#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "使用数据训练模型" +#~ msgid "with the following command sequence:" +#~ msgstr "使用以下命令序列:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" -msgstr "" +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" +#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " +#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "手机上的数据" +#~ msgid "Flower server" +#~ msgstr "Flower 服务器" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." -msgstr "" -"值得一提的是,这个 \"其他地方 " -"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" -msgstr "" +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "数据存在于多种设备中" +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" +#~ msgid "Start a Flower Driver API server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|7605632e1b0f49599ffacf841491fcfb|" -msgstr "" +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" +#~ "Flower 1.0: ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "集中数据收集" +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|91b1b5a7d3484eb7a2350c1923f18307|" -msgstr "" +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "server.strategy.Strategy" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "集中模型训练" +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "如果已连接,请断开与超级链接的连接。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "经典机器学习面临的挑战" +#~ msgid "start\\_driver" +#~ msgstr "启动客户端" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|5405ed430e4746e28b083b146fb71731|" -msgstr "" +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "可集中管理" +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_server` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" +#~ msgid "The Driver object to use." +#~ msgstr "要使用的驱动程序对象。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|a389e87dab394eb48a8949aa2397687b|" -msgstr "" +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "启动不安全的服务器:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "无法集中" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "启动支持 SSL 的服务器:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." -msgstr "" -"**法规**: " -"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." -msgstr "" -"**用户偏好**: " -"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "运行模拟" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" -"**数据量**: " -"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "集中式机器学习不起作用的例子包括:" +#~ msgid "Quickstart MXNet" +#~ msgstr "快速入门 MXNet" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "用多家医院的敏感医疗记录训练癌症检测模型" +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "不同组织的财务信息,以侦查财务欺诈行为" +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "通过电动汽车的定位数据更好地预测续航里程" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "端到端加密信息可训练出更好的自动完成模型" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" +#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " +#~ "MXNet 的 `手写数字识别教程 " +#~ "`_\"。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." -msgstr "" -"像 `Brave `__浏览器或 `Signal " -"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "联邦学习" +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" +#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" +#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "集中式机器学习:将数据转移到计算中心" +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "联邦式(机器)学习:将计算转移到数据上" +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "联邦学习的五个步骤" +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" +#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " +#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " +#~ "客户端。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "步骤 0:初始化全局模型" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" +#~ msgid "They can be implemented in the following way:" +#~ msgstr "它们可以通过以下方式实现:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|89c412136a5146ec8dc32c0973729f12|" -msgstr "" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "初始化全局模型" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ " :code:`server_address`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可在 :code:`examples/quickstart-" +#~ "mxnet` 中找到。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|9503d3dc3a144e8aa295f8800cd8a766|" -msgstr "" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "发送全局模型" +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "使用 OpenML 加载 MNIST 数据集" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." -msgstr "" -"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" -"(mini-batches)。" +#~ msgid "Shuffles data and its label" +#~ msgstr "对数据及其标签进行洗牌" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" -msgstr "" +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "根据本地数据进行训练" +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "将数据集分割成多个分区" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "步骤 3:将模型参数更新返回服务器" +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" +#~ "我们从 `OpenML `_ 中加载 " +#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " +#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " +#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" +#~ msgid "Let's get stated!" +#~ msgstr "让我们开始吧!" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a7579ad7734347508e959d9e14f2f53d|" -msgstr "" +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "发送模型参数更新" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "步骤 4:将模型更新聚合到新的全局模型中" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" -msgstr "" -"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " -"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" -"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " -"*Federated Averaging* (`McMahan等人,2016 " -"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " -"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" -" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" -" 100 个示例的 10 倍。" +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|73d15dd1d4fc41678b2d54815503fbe8|" -msgstr "" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "聚合模型参数更新" +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" -"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " -"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" -"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " -"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" -"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" -" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" -"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" -" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "联邦分析" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" -"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " -"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "目前,Flower " +#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" +#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" +#~ " Flower 服务器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#~ msgid "``3.11``" +#~ msgstr "``1.0.0rc1``" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" -"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " -"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " -"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "默认为 ``22.04``。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|55472eef61274ba1b739408607e109df|" -msgstr "" +#~ msgid "Building the SuperLink image" +#~ msgstr "启动服务器" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "默认为 ``flwr/server``。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" +#~ msgid "The Python version of the base image." +#~ msgstr "基础镜像的存储库名称。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "默认为 ``22.04``。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" -msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" -"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " -"构建一个简单的联邦学习系统。" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "默认为 ``flwr/server``。" -#~ msgid "Before the release" -#~ msgstr "发布前" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" + +#~ msgid "Creating New Messages" +#~ msgstr "创建新信息" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" -#~ msgstr "" -#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " -#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" -#~ msgstr "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." + +#~ msgid "Server's side:" +#~ msgstr "在服务器端:" + +#~ msgid "Client's side:" +#~ msgstr "在客户端:" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." -#~ msgstr "" -#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " -#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "协议缓冲区的信息类型" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " -#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" +#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " +#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" +#~ " `_。" + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "在 :code:`ServerMessage` 代码块中:" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "在 ClientMessage 代码块中:" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" -#~ msgstr "" -#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" -#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " -#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" -#~ msgid "flwr (Python API reference)" -#~ msgstr "flwr(Python API 参考)" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "完成后,我们将使用:" -#~ msgid "..." -#~ msgstr "..." +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "如果编译成功,你应该会看到以下信息:" -#~ msgid "Starting a client with an insecure server connection:" -#~ msgstr "使用不安全的服务器连接启动客户端:" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "序列化和反序列化函数" -#~ msgid "server.strategy.FedAvg" -#~ msgstr "server.strategy.FedAvg" +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" +#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " +#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" +#~ " :code:`serde.py` 中添加这些函数。" -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "server.strategy.FedAvgM" +#~ msgid "The four functions:" +#~ msgstr "四种函数:" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." -#~ msgstr "可配置的 FedAvg 动量策略实施。" +#~ msgid "Sending the Message from the Server" +#~ msgstr "从服务器发送信息" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." -#~ msgstr "训练期间使用客户的比例。默认为 0.1。" +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "由客户端接收信息" -#~ msgid "server.strategy.FedMedian" -#~ msgstr "server.strategy.FedMedian" +#~ msgid "" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" +#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" +#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" +#~ msgid "Within the handle function:" +#~ msgstr "在句柄函数内:" -#~ msgid "server.strategy.FedOpt" -#~ msgstr "server.strategy.FedOpt" +#~ msgid "And add a new function:" +#~ msgstr "并增加一个新函数:" -#~ msgid "Configurable FedAdagrad strategy implementation." -#~ msgstr "可配置的 FedAdagrad 策略实施。" +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "希望您在运行程序时能得到预期的结果!" -#~ msgid "Federated Optim strategy interface." -#~ msgstr "Federated Optim 策略界面。" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgid "server.strategy.FedProx" -#~ msgstr "server.strategy.FedProx" +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" -#~ msgid "Configurable FedProx strategy implementation." -#~ msgstr "可配置的 FedProx 策略实施。" +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "server.strategy.FedAdagrad" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "Flower 服务器。" -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" -#~ msgstr "论文: https://arxiv.org/abs/2003.00295" +#~ msgid "Unreleased" +#~ msgstr "版本发布" -#~ msgid "Federated learning strategy using Adagrad on server-side." -#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "server.strategy.FedAdam" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "server.strategy.FedYogi" +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "使用 Yogi 的自适应联合优化。" +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgid "Paper: https://arxiv.org/abs/1803.01498" -#~ msgstr "论文:https://arxiv.org/abs/1803.01498" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgid "server.strategy.Krum" -#~ msgstr "server.strategy.Krum" +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgid "Configurable Krum strategy implementation." -#~ msgstr "可配置的 Krum 策略实施。" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" -#~ msgid "server.strategy.Bulyan" -#~ msgstr "server.strategy.Bulyan" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgid "Bulyan strategy implementation." -#~ msgstr "Bulyan策略的实施。" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgid "server.strategy.FedXgbNnAvg" -#~ msgstr "server.strategy.FedXgbNnAvg" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." -#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgid "server.strategy.DPFedAvgAdaptive" -#~ msgstr "server.strategy.DPFedAvgAdaptive" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" -#~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" -#~ msgstr "" -#~ "**修复策略的错误返回类型** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" + +#~ msgid "Edge Client Engine" +#~ msgstr "边缘客户端引擎" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." -#~ msgstr "" -#~ "两个方法(\"aggregate_fit \"和 " -#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "具有边缘客户端引擎的`Flower `核心架构" -#~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" -#~ msgstr "" -#~ "** 更新 Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgid "Virtual Client Engine" +#~ msgstr "虚拟客户端引擎" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." -#~ msgstr "" -#~ "对于客户端就需要做这么多。我们仅需要实现 " -#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" -#~ " :code:`\"0.0.0.0:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" -#~ " server_address 。" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "具有虚拟客户端引擎的`Flower `核心架构" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." -#~ msgstr "" -#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " -#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " -#~ "即可。字符串 :code:`\"[::]:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" -#~ " server_address 。" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" -#~ msgstr "" -#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " -#~ "``DataLoader`` 来包装由此产生的分割集:" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" -#~ msgid "|e1dd4b4129b040bea23a894266227080|" -#~ msgstr "|e1dd4b4129b040bea23a894266227080|" +#~ msgid "Clone the flower repository." +#~ msgstr "**叉花仓库**" -#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" -#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ "请阅读 :doc:`Run Flower using Docker " +#~ "` " +#~ "的第一节,其中更详细地介绍了这一步骤。" -#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" -#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgid "``22.04``" +#~ msgstr "``1.0.0rc1``" -#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" -#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgid "``23.0.1``" +#~ msgstr "``1.0.0rc1``" -#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" -#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" -#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" -#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" -#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" -#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "启动服务器" -#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" -#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" -#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" -#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" -#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" -#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "官方 Ubuntu Docker 映像的版本。" -#~ msgid "|88002bbce1094ba1a83c9151df18f707|" -#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" -#~ msgid "|391766aee87c482c834c93f7c22225e2|" -#~ msgstr "|391766aee87c482c834c93f7c22225e2|" +#~ msgid "**Via the UI**" +#~ msgstr "**审查 PR**" -#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" -#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" -#~ msgid "|a23d9638f96342ef9d25209951e2d564|" -#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" -#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" -#~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.6.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ "将``!pip install -q 'flwr[simulation]' torch" -#~ " torchvision matplotlib``更改为``!pip install -q " -#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " -#~ "torch torchvision matplotlib``" #~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_numpy_client()` by pointing " -#~ "it at the same IP address we " -#~ "used in :code:`server.py`:" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" -#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" -#~ " :code:`cifar.py` 加载数据和模型。使用函数 " -#~ ":code:`fl.client.start_numpy_client()` 启动 " -#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " -#~ "IP 地址:" #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" -#~ " configure by creating a class " -#~ "inheriting, for example, from " -#~ "`flwr.client.NumPyClient `_ and therefore " -#~ "behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." #~ msgstr "" -#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " -#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " -#~ "`_\" " -#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " -#~ "管理的客户端还包括:" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "实例: PyTorch 和 MNIST 的演练" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "示例: JAX - 运行联邦式 JAX" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" +#~ "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 " +#~ "`Docker Hub `_" +#~ " 上找到这些镜像。" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" - -#~ msgid "Ready... Set... Train!" -#~ msgstr "准备...设置...训练!" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在" +#~ " `_ 找到安装说明。" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" -#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的 `Basic MNIST Example " -#~ "`_。您会发现用 " -#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" -#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" +#~ "在 Linux 上,Docker 命令需要 ``sudo`` " +#~ "权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 `安装后步骤" +#~ " `_进行操作。" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" +#~ "为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " +#~ "映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" -#~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" +#~ msgid "Quickstart" +#~ msgstr "快速入门 JAX" -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "现在,让我们看看里面到底发生了什么。" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "如果您想试用 Flower,可以使用以下命令:" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" -#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的" +#~ " Docker 镜像。标签包含使用 Flower、Python 和 Ubuntu" +#~ " 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和" +#~ " Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" +#~ "``-p :`` 标记会告诉 Docker 将主机的端口" +#~ " ``9091``/``9092`` 映射到容器的端口 ``9091``/`9092``,这样你就可以在" +#~ " ``http://localhost:9091`` 上访问 Driver API,在 " +#~ "``http://localhost:9092`` 上访问 Fleet " +#~ "API。最后,标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " +#~ "``--insecure`` 。" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." -#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "不安全 \"标志启用不安全通信(使用 HTTP,而非 " +#~ "HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " +#~ "`_。" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." -#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "在主机系统上挂载卷以存储状态" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." #~ msgstr "" -#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " -#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" -#~ " 数据集上有独立的数据。" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." #~ msgstr "" -#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" -#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " -#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " -#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" -#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -#~ ":code:`fl.client.start_client`。" -#~ msgid "A Closer Look" -#~ msgstr "仔细看一下" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." #~ msgstr "" -#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" -#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" +#~ "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 " +#~ "ClientApp 进行 docker 化。" + +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" -#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "假设项目布局如下" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " -#~ "下找到,现复制如下。它与 `Basic MNIST Example " -#~ "`_中的网络相同。" +#~ "首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 " +#~ "``requirements.txt`` 文件。在该文件中,我们列出了 ClientApp " +#~ "需要的所有依赖项。" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." #~ msgstr "" -#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" +#~ "请注意,`flwr `__ " +#~ "已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " -#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " -#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" +#~ "接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch``" +#~ " 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为" +#~ " ``Dockerfile.supernode`` 的新文件。" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" -#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" -#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " -#~ ":code:`evaluate` " -#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" +#~ "在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 " +#~ "SuperNode 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " +#~ "文件复制到映像中并运行 ``pip install`` 来安装 ClientApp " +#~ "依赖项。最后两行,我们将 ``client.py`` 模块复制到映像中,并将入口点设置为 " +#~ "``flower-client-app``,参数为 ``client:app``。参数是将在 " +#~ "ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" -#~ msgid "Give It a Try" -#~ msgstr "试试看" +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "启动服务器" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ "通过上面的快速入门代码描述,你将对 Flower " -#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" -#~ " Flower 的经验:" +#~ "我们将图像命名为 ``flwr_supernode``,标签为 " +#~ "``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." -#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "启动服务器" -#~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" -#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" -#~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" -#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "让我们来分析一下这条命令的各个部分:" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" -#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "`-rm``: 该选项指定容器停止时应自动移除。" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "不安全\": 该选项启用不安全通信。" -#~ msgid "Differential privacy" -#~ msgstr "差别隐私" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." #~ msgstr "" -#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " -#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -#~ "框架中定义的训练模式中。" +#~ "要测试在本地运行 Flower,可以创建一个 \"桥接网络 " +#~ "`__\",使用\"--网络 \"参数并传递 Docker " +#~ "网络的名称,以运行超级节点。" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." -#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." -#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "与 SuperNode 映像类似,ServerApp Docker 映像也预装了 " +#~ "Flower 版本,可作为构建自己的 ServerApp 映像的基础。" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." -#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "我们将使用与 \"Flower SuperNode \"部分相同的 " +#~ "\"quickstart-pytorch \"示例。如果您还没有这样做,请在继续之前遵循 " +#~ "\"SuperNode 先决条件\"。" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "创建 ServerApp Dockerfile" + +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用" +#~ " ``quickstart-pytorch`` 示例,请在 ``examples" +#~ "/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.serverapp``" +#~ " 的新文件。" #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." -#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." -#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 " +#~ "ServerApp 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " +#~ "模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 " +#~ "``server:app``。参数是将在 ServerApp 容器内运行的 ServerApp " +#~ "的对象引用(``<模块>:<属性>``)。" -#~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "启动服务器" -#~ msgid "Simplifying Assumptions" -#~ msgstr "简化假设" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "启动服务器" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." -#~ msgstr "" -#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " -#~ ":math:`(\\epsilon,\\delta)` 。" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" #~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." -#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." -#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "要测试在本地运行 Flower,可以创建一个 ``bridge network " +#~ "`___,使用 ``--network`` 参数并传递 Docker " +#~ "网络的名称,以运行 ServerApps。" #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." -#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." -#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." -#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "可定制的噪声注入" +#~ msgid "Run with root user privileges" +#~ msgstr "" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." -#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgid "**Run a container with root user privileges**" #~ msgstr "" -#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" -#~ msgid "Wrapper-based approach" -#~ msgstr "基于封装的方法" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" + +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" + +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" + +#~ msgid "d defaults to None." +#~ msgstr "d 默认为 \"无\"。" + +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "根据二进制/可迭代 E 和 F 更新 R。" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ "在现有工作负载中引入 DP " -#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -#~ ":code:`Strategy` 和 :code:`NumPyClient` " -#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" -#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -#~ ":code:`__init__()` " -#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -#~ " :code:`DPFedAvgFixed` 和 " -#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" -#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" + +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" -#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " -#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " -#~ "下键入)进行扩充。并且,如果 " -#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " -#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " -#~ "所返回的结果进行后处理。" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" -#~ ":code:`aggregate_fit()`: " -#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " -#~ "1,强制以不加权的方式平均更新。此外,如果 " -#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" -#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " -#~ "之前,对参数进行*预*处理。" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." -#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" + +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" + +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" + +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" + +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "run\\_fleet\\_api" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ " +#~ "配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" + +#~ msgid "key shares." +#~ msgstr "关键股份。" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " -#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg " +#~ "配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" -#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " -#~ "返回的 config 字典,并在其中添加键-值对 " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" +#~ "字典,例如 {\"\": , \"\": " +#~ "} 来配置后端。 中支持的值是 " +#~ "`flwr.common.typing.ConfigsRecordValues`中包含的值。" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." -#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." -#~ msgstr "" -#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " -#~ ":code:`fit()` " -#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." -#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" -#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" +#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的《Deep Learning with PyTorch " +#~ "`_》。" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" + +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " -#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" -#~ " 值,可以使用下面的脚本。" +#~ "我们使用 PyTorch 来加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " +#~ ":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" -#~ msgid "Flower driver SDK." -#~ msgstr "Flower 服务器。" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" -#~ msgid "driver" -#~ msgstr "服务器" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" -#~ msgid "Get task results." -#~ msgstr "汇总训练结果。" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" -#~ msgid "Request for run ID." -#~ msgstr "Flower 基线申请" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" -#~ msgid "Get client IDs." -#~ msgstr "返回客户端(本身)。" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" -#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -#~ "`_。" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "快速入门 TensorFlow/Keras" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "可以通过以下方式实现:" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "传统示例 (`flwr_example`)" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "pytorch` 中找到。" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " -#~ "`examples `_" -#~ " 中提供。" +#~ "代码:`self.bst`用于保存在各轮中保持一致的 Booster " +#~ "对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" -#~ msgid "Extra Dependencies" -#~ msgstr "额外依赖" +#~ msgid "Implementing a Flower client" +#~ msgstr "实现 Flower 客户端" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " -#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" +#~ "为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` " +#~ "的子类,并实现了 ``get_parameters``、``fit`` 和``evaluate`` " +#~ "三个方法:" -#~ msgid "For PyTorch examples::" -#~ msgstr "PyTorch 示例::" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" +#~ "函数 ``start_simulation`` 接受许多参数,其中包括用于创建 " +#~ "``FlowerClient`` 实例的 " +#~ "``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" +#~ " (FedAvg)。" -#~ msgid "For TensorFlow examples::" -#~ msgstr "TensorFlow 示例::" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "PyTorch 和 TensorFlow 示例::" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" -#~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -#~ ":code:`[tool.poems.extras]`)。" -#~ msgid "PyTorch Examples" -#~ msgstr "PyTorch 示例" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" -#~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." -#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "CIFAR-10 图像分类" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "" -#~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ "CIFAR-10 和 CIFAR-100 " -#~ "``_ 是流行的 RGB" -#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " -#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" -#~ msgid "First, start a Flower server:" -#~ msgstr "首先,启动 Flower 服务器:" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" -#~ msgstr "然后,在新的终端窗口中启动两个客户端:" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 图像分类" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "" -#~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ "ImageNet-2012 `_ " -#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " -#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "run\\_client\\_app" +#~ msgstr "客户端" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" -#~ msgid "TensorFlow Examples" -#~ msgstr "TensorFlow 示例" +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." -#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Fashion-MNIST 图像分类" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" -#~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" -#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "run\\_server\\_app" +#~ msgstr "服务器" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "多节点 Flower 模拟" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "基本图像标签" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "需要等待的客户数量。" -#~ msgid "The image tag of the base image." -#~ msgstr "基础图像的图像标记。" +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" -#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " -#~ ":ref:`changelogentry` 附录中的一些示例和细节。" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "打开 PR(如上图所示)" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" +#~ "抽象基类 `flwr.server.strategy.Strategy` " +#~ "的实现。如果没有提供策略,`start_server` 将使用 " +#~ "`flwr.server.strategy.FedAvg`。" -#~ msgid "How to write a good PR title" -#~ msgstr "如何撰写好的公关标题" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.strategy.Strategy` " +#~ "的实现。如果没有提供策略,`start_server` 将使用 " +#~ "`flwr.server.strategy.FedAvg`。" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" -#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_server` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." #~ msgstr "" -#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " -#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " -#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" -#~ " 遵守语法规则,以确保清晰。" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" -#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" -#~ msgid "Implement Algorithm" -#~ msgstr "执行算法" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "在代码库中添加 my_new_file.py" +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" -#~ msgid "Improve code in module" -#~ msgstr "改进模块中的代码" +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" -#~ msgid "Change SomeModule" -#~ msgstr "更改 SomeModule" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" -#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "移除不必要的 XGBoost 依赖性" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "**hist** -- 包含训练和评估指标的对象。" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "删除 FedAvg 子类化策略中的多余属性" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_了解更多信息。" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgstr "添加新的惊人库,用于改进模拟引擎" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" -#~ msgid "Changelog entry" -#~ msgstr "更新日志" +#~ msgid "Dependencies" +#~ msgstr "依赖关系" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." -#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" +#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " +#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" +#~ " :code:`pip` 来完成:" -#~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" -#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "标准Hugging Face工作流程" -#~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." -#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" +#~ msgid "Handling the data" +#~ msgstr "处理数据" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." -#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" +#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " +#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " +#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" -#~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." -#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" +#~ msgid "Training and testing the model" +#~ msgstr "训练和测试模型" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" -#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" +#~ "有了创建 trainloader 和 testloader " +#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" -#~ msgid " is for classifying a PR as a general improvement." -#~ msgstr " 用于将 PR 划分为一般改进。" +#~ msgid "Creating the model itself" +#~ msgstr "创建模型本身" -#~ msgid " is to not add the PR to the changelog" -#~ msgstr "表示不将 PR 添加到更新日志中" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "要创建模型本身,我们只需使用 Hugging Face 的 " +#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " +#~ "distillBERT 模型:" -#~ msgid " is to add a general baselines change to the PR" -#~ msgstr " 是指在 PR 中添加一般基线更改" +#~ msgid "Creating the IMDBClient" +#~ msgstr "创建 IMDBClient" -#~ msgid " is to add a general examples change to the PR" -#~ msgstr " 是在 PR 中添加对一般示例的修改" +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " +#~ ":code:`PyTorch` 模型:" -#~ msgid " is to add a general sdk change to the PR" -#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ ":code:`get_parameters` " +#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" -#~ msgid " is to add a general simulations change to the PR" -#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" +#~ msgid "Starting the server" +#~ msgstr "启动服务器" -#~ msgid "Note that only one token should be used." -#~ msgstr "请注意,只能使用一个标记。" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" -#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "使用 :code:`weighted_average` " +#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" + +#~ msgid "Putting everything together" +#~ msgstr "把所有东西放在一起" + +#~ msgid "We can now start client instances using:" +#~ msgstr "现在我们可以使用:" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "他们就能连接到服务器,开始联邦训练。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" +#~ "如果您想查看所有内容,请查看完整的代码示例: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." -#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" +#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " +#~ "联合Hugging Face的工作流程。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_ 了解更多信息。" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "在导入 Flower 之前,我们必须先安装它:" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." -#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "示例: MXNet - 运行联邦式 MXNet" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" -#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " -#~ "`PyTorch - 从集中式到联邦式 " -#~ "`_ 教程类似的示例结构。MXNet" -#~ " 和 PyTorch 非常相似,参考 `此处 " -#~ "`_对 MXNet " -#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " -#~ "`" -#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" +#~ "我们使用 TF 的 Keras 实用程序加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" -#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" - -#~ msgid "MNIST Training with MXNet" -#~ msgstr "使用 MXNet 进行 MNIST 训练" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" -#~ "首先,我们将简要介绍基于 :code:`Sequential` " -#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " -#~ "`_。" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" -#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " -#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " -#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " +#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." -#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " -#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" +#~ " :code:`server_address`。" -#~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." -#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" +#~ msgid "Each client will have its own dataset." +#~ msgstr "每个客户都有自己的数据集。" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." -#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " -#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "tensorflow/client.py` 中找到。" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ "到目前为止,如果你以前使用过 MXNet(甚至 " -#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet 结合 Flower" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " -#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " -#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" -#~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " -#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " -#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " -#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" -#~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ "实现 Flower *client*基本上意味着要实现 " -#~ ":code:`flwr.client.Client` 或 " -#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " -#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" -#~ " 或 MXNet),:code:`NumPyClient` 比 " -#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " -#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" -#~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " -#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" -#~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " -#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " -#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" -#~ " :code:`NumPyClient` 子类告知 Flower " -#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " -#~ ":code:`Sequential` 模型。" -#~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" -#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" -#~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ "此示例的完整源代码在:\"MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" -#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" -#~ msgid "with the following command sequence:" -#~ msgstr "使用以下命令序列:" +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" -#~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." -#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" -#~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." -#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" -#~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ msgid "|f403fcd69e4e44409627e748b404c086|" #~ msgstr "" -#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " -#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" -#~ msgid "Flower server" -#~ msgstr "Flower 服务器" +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgid "|82324b9af72a4582a81839d55caab767|" #~ msgstr "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "启动基于 Ray 的Flower模拟服务器。" +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" -#~ "Flower 1.0: ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" -#~ msgid "`Driver` class provides an interface to the Driver API." -#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" -#~ msgstr "server.strategy.Strategy" +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." -#~ msgstr "如果已连接,请断开与超级链接的连接。" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#~ msgid "start\\_driver" -#~ msgstr "启动客户端" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" -#~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" -#~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." -#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" -#~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ "抽象基类 `flwr.server.ClientManager` " -#~ "的实现。如果没有提供实现,`start_server` 将使用 " -#~ "`flwr.server.client_manager.SimpleClientManager`。" -#~ msgid "The Driver object to use." -#~ msgstr "要使用的驱动程序对象。" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" -#~ msgstr "启动不安全的服务器:" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" -#~ msgstr "启动支持 SSL 的服务器:" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" -#~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "运行模拟" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" -#~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" -#~ msgid "Quickstart MXNet" -#~ msgstr "快速入门 MXNet" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " -#~ "MXNet 的 `手写数字识别教程 " -#~ "`_\"。" -#~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" -#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" -#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." -#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" -#~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." -#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" -#~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." -#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" -#~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" #~ msgstr "" -#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " -#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " -#~ "客户端。" -#~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" -#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " -#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " -#~ "是可选的):" -#~ msgid "They can be implemented in the following way:" -#~ msgstr "它们可以通过以下方式实现:" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" -#~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" -#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" -#~ " 并调用 :code:`fl.client.start_client()` 或 " -#~ ":code:`fl.client.start_numpy_client()`。字符串 " -#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" -#~ " " -#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -#~ " :code:`server_address`。" -#~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" -#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" -#~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -#~ "`_ 可在 :code:`examples/quickstart-" -#~ "mxnet` 中找到。" -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "使用 OpenML 加载 MNIST 数据集" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "对数据及其标签进行洗牌" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "将数据集分割成多个分区" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ "我们从 `OpenML `_ 中加载 " -#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " -#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " -#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "Let's get stated!" -#~ msgstr "让我们开始吧!" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "Include SecAgg, SecAgg+, and LightSecAgg " +#~ "protocol. The LightSecAgg protocol has " +#~ "not been implemented yet, so its " +#~ "diagram and abstraction may not be " +#~ "accurate in practice. The SecAgg " +#~ "protocol can be considered as a " +#~ "special case of the SecAgg+ protocol." #~ msgstr "" +#~ "包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg" +#~ " 协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ " +#~ "协议的特例。" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "The ``SecAgg+`` abstraction" +#~ msgstr "代码:`SecAgg+` 抽象" + +#~ msgid "" +#~ "In this implementation, each client will" +#~ " be assigned with a unique index " +#~ "(int) for secure aggregation, and thus" +#~ " many python dictionaries used have " +#~ "keys of int type rather than " +#~ "ClientProxy type." #~ msgstr "" +#~ "在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是" +#~ " int 类型,而不是 ClientProxy 类型。" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ "The Flower server will execute and " +#~ "process received results in the " +#~ "following order:" +#~ msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" + +#~ msgid "The ``LightSecAgg`` abstraction" +#~ msgstr "代码:`LightSecAgg` 抽象" + +#~ msgid "Types" +#~ msgstr "类型" + +#~ msgid "" +#~ "Docker Compose is `installed " +#~ "`_." #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "Run the example:" +#~ msgstr "将示例联邦化" + +#~ msgid "Follow the logs of the SuperExec service:" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "Only runs on AMD64." #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "" +#~ "Use the method that works best for" +#~ " you to copy the ``server`` " +#~ "directory, the certificates, and your " +#~ "Flower project to the remote machine." #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``server`` Docker Compose files." #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "" +#~ "The Path of the ``PROJECT_DIR`` should" +#~ " be relative to the location of " +#~ "the ``client`` Docker Compose files." #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "" +#~ "The Path of the ``root-certificates``" +#~ " should be relative to the location" +#~ " of the ``pyproject.toml`` file." #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "To run the project, execute:" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "Run the ``quickstart-docker`` project by executing the command:" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "Follow the SuperExec logs to track the execution of the run:" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "Execute the command to run the quickstart example:" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "Monitor the SuperExec logs and wait for the summary to appear:" #~ msgstr "" +#~ msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#~ msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" + +#~ msgid "Centralized Training" +#~ msgstr "集中式训练" + #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called ``cifar.py``, revised part is " +#~ "shown below:" #~ msgstr "" -#~ "目前,Flower " -#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" -#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" -#~ " Flower 服务器。" +#~ "所有文件均根据 `示例: PyTorch -从集中式到联邦式 " +#~ "`_。唯一要做的就是修改名为 " +#~ ":code:`cifar.py` 的文件,修改部分如下所示:" -#~ msgid "``3.11``" -#~ msgstr "``1.0.0rc1``" +#~ msgid "" +#~ "The model architecture defined in class" +#~ " Net() is added with Batch " +#~ "Normalization layers accordingly." +#~ msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "You can now run your machine learning workload:" +#~ msgstr "现在,您可以运行您的机器学习工作了:" -#~ msgid "Building the SuperLink image" -#~ msgstr "启动服务器" +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the system consists of one " +#~ "server and two clients." +#~ msgstr "" +#~ "到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 " +#~ "FedBN 中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" + +#~ msgid "Federated Training" +#~ msgstr "联邦培训" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only ``get_parameters`` " +#~ "and ``set_parameters`` function in " +#~ "``client.py`` needed to revise. If not," +#~ " please read the :doc:`Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`. first." +#~ msgstr "" +#~ "如果你读过 `示例: PyTorch - 从集中式到联邦式 " +#~ "`_,下面的部分就很容易理解了,只需要修改 " +#~ ":code:`get_parameters` 和 :code:`set_parameters` 中的" +#~ " :code:`client.py` 函数。如果没有,请阅读 `示例: PyTorch " +#~ "- 从集中式到联邦式 `_。" + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ "``server.py`` keeps unchanged, we can " +#~ "start the server directly." +#~ msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" + +#~ msgid "Now, you can now open two additional terminal windows and run" +#~ msgstr "现在,您可以打开另外两个终端窗口并运行程序" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning with FedBN strategy across two" +#~ " clients. Congratulations!" +#~ msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" -#~ msgid "The Python version of the base image." -#~ msgstr "基础镜像的存储库名称。" +#~ msgid "Example: PyTorch - From Centralized To Federated" +#~ msgstr "实例: PyTorch - 从集中式到联邦式" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload. We are using PyTorch to" +#~ " train a Convolutional Neural Network " +#~ "on the CIFAR-10 dataset. First, we " +#~ "introduce this machine learning task " +#~ "with a centralized training approach " +#~ "based on the `Deep Learning with " +#~ "PyTorch " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch " +#~ "在 CIFAR-10 数据集上训练一个卷积神经网络。首先,我们基于 \"Deep " +#~ "Learning with PyTorch " +#~ "`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized CNN training code." +#~ " If you want a more in-depth" +#~ " explanation of what's going on then" +#~ " have a look at the official " +#~ "`PyTorch tutorial " +#~ "`_." +#~ msgstr "" +#~ "我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch " +#~ "官方教程`PyTorch tutorial " +#~ "`_。" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "" +#~ "Let's create a new file called " +#~ "``cifar.py`` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as ``torch`` and" +#~ " ``torchvision``) need to be imported. " +#~ "You can see that we do not " +#~ "import any package for federated " +#~ "learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." +#~ msgstr "" +#~ "让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " +#~ "传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " +#~ ":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." -#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in ``class Net()``." +#~ msgstr "" +#~ "如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 " +#~ ":code:`class Net()` 中定义。" -#~ msgid "Creating New Messages" -#~ msgstr "创建新信息" +#~ msgid "" +#~ "The ``load_data()`` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ "``transform`` normalized the data after " +#~ "loading." +#~ msgstr "" +#~ ":code:`load_data()` 函数加载 CIFAR-10 " +#~ "训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." -#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" +#~ "We now need to define the training" +#~ " (function ``train()``) which loops over" +#~ " the training set, measures the loss," +#~ " backpropagates it, and then takes " +#~ "one optimizer step for each batch " +#~ "of training examples." +#~ msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." -#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." +#~ "The evaluation of the model is " +#~ "defined in the function ``test()``. The" +#~ " function loops over all test samples" +#~ " and measures the loss of the " +#~ "model based on the test dataset." +#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" -#~ msgid "Server's side:" -#~ msgstr "在服务器端:" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our CNN on CIFAR-10." +#~ msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" -#~ msgid "Client's side:" -#~ msgstr "在客户端:" +#~ msgid "" +#~ "So far, this should all look " +#~ "fairly familiar if you've used PyTorch" +#~ " before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." +#~ msgstr "" +#~ "到目前为止,如果你以前用过 " +#~ "PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" + +#~ msgid "" +#~ "The simple machine learning project " +#~ "discussed in the previous section trains" +#~ " the model on a single dataset " +#~ "(CIFAR-10), we call this centralized " +#~ "learning. This concept of centralized " +#~ "learning, as shown in the previous " +#~ "section, is probably known to most " +#~ "of you, and many of you have " +#~ "used it previously. Normally, if you'd" +#~ " want to run machine learning " +#~ "workloads in a federated fashion, then" +#~ " you'd have to change most of " +#~ "your code and set everything up " +#~ "from scratch. This can be a " +#~ "considerable effort." +#~ msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" + +#~ msgid "" +#~ "However, with Flower you can evolve " +#~ "your pre-existing code into a " +#~ "federated learning setup without the " +#~ "need for a major rewrite." +#~ msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" + +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in ``cifar.py``" +#~ " for the *clients* that are connected" +#~ " to the *server*. The *server* sends" +#~ " model parameters to the clients. The" +#~ " *clients* run the training and " +#~ "update the parameters. The updated " +#~ "parameters are sent back to the " +#~ "*server* which averages all received " +#~ "parameter updates. This describes one " +#~ "round of the federated learning process" +#~ " and we repeat this for multiple " +#~ "rounds." +#~ msgstr "" +#~ "这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +#~ ":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ "``server.py`` first. The *server* needs " +#~ "to import the Flower package ``flwr``." +#~ " Next, we use the ``start_server`` " +#~ "function to start a server and " +#~ "tell it to perform three rounds of" +#~ " federated learning." +#~ msgstr "" +#~ "我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 " +#~ "Flower 软件包 :code:`flwr`。接下来,我们使用 " +#~ ":code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" + +#~ msgid "We can already start the *server*:" +#~ msgstr "我们已经可以启动*服务器*了:" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" -#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined centralized " +#~ "training in ``cifar.py``. Our *client* " +#~ "needs to import ``flwr``, but also " +#~ "``torch`` to update the parameters on" +#~ " our PyTorch model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`cifar.py` 中定义的集中式训练为基础。我们的 *client*" +#~ " 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " +#~ "PyTorch 模型的参数:" -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "协议缓冲区的信息类型" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``CifarClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. ``CifarClient``" +#~ " needs to implement four methods, two" +#~ " methods for getting/setting model " +#~ "parameters, one method for training the" +#~ " model, and one method for testing" +#~ " the model:" +#~ msgstr "" +#~ "实现 Flower *client*基本上意味着实现 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`CifarClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" +#~ " 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " +#~ ":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#~ msgid "``set_parameters``" +#~ msgstr ":code:`set_parameters`" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." -#~ msgstr "" -#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " -#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" -#~ " `_。" +#~ "set the model parameters on the " +#~ "local model that are received from " +#~ "the server" +#~ msgstr "在本地模型上设置从服务器接收的模型参数" -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "在 :code:`ServerMessage` 代码块中:" +#~ msgid "" +#~ "loop over the list of model " +#~ "parameters received as NumPy ``ndarray``'s " +#~ "(think list of neural network layers)" +#~ msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" -#~ msgid "Within the ClientMessage block:" -#~ msgstr "在 ClientMessage 代码块中:" +#~ msgid "``get_parameters``" +#~ msgstr ":code:`get_parameters`" #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." -#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ "``ndarray``'s (which is what " +#~ "``flwr.client.NumPyClient`` expects)" +#~ msgstr "" +#~ "获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " +#~ ":code:`flwr.client.NumPyClient`所匹配的格式)" -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "完成后,我们将使用:" +#~ msgid "``fit``" +#~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "如果编译成功,你应该会看到以下信息:" +#~ msgid "" +#~ "update the parameters of the local " +#~ "model with the parameters received from" +#~ " the server" +#~ msgstr "用从服务器接收到的参数更新本地模型的参数" -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "序列化和反序列化函数" +#~ msgid "train the model on the local training set" +#~ msgstr "在本地训练集上训练模型" + +#~ msgid "get the updated local model weights and return them to the server" +#~ msgstr "获取更新后的本地模型参数并发送回服务器" + +#~ msgid "evaluate the updated model on the local test set" +#~ msgstr "在本地测试集上评估更新后的模型" + +#~ msgid "return the local loss and accuracy to the server" +#~ msgstr "向服务器返回本地损失值和精确度" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``test()`` " +#~ "previously defined in ``cifar.py``. So " +#~ "what we really do here is we " +#~ "tell Flower through our ``NumPyClient`` " +#~ "subclass which of our already defined" +#~ " functions to call for training and" +#~ " evaluation. We included type annotations" +#~ " to give you a better understanding" +#~ " of the data types that get " +#~ "passed around." #~ msgstr "" -#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " -#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" -#~ " :code:`serde.py` 中添加这些函数。" +#~ "这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`cifar.py` 中定义的函数 " +#~ ":code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " +#~ ":code:`NumPyClient` 子类告知 Flower " +#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#~ msgid "The four functions:" -#~ msgstr "四种函数:" +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ "``CifarClient``, and starts this client. " +#~ "You load your data and model by" +#~ " using ``cifar.py``. Start ``CifarClient`` " +#~ "with the function ``fl.client.start_client()`` " +#~ "by pointing it at the same IP " +#~ "address we used in ``server.py``:" +#~ msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" -#~ msgid "Sending the Message from the Server" -#~ msgstr "从服务器发送信息" +#~ msgid "And that's it. You can now open two additional terminal windows and run" +#~ msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" -#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" +#~ "in each window (make sure that the" +#~ " server is running before you do " +#~ "so) and see your (previously " +#~ "centralized) PyTorch project run federated " +#~ "learning across two clients. Congratulations!" +#~ msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" -#~ msgid "Receiving the Message by the Client" -#~ msgstr "由客户端接收信息" +#~ msgid "" +#~ "The full source code for this " +#~ "example: `PyTorch: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is, of course, somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using different subsets of CIFAR-10 " +#~ "on each client? How about adding " +#~ "more clients?" +#~ msgstr "" +#~ "本示例的完整源代码为:`PyTorch: 从集中式到联合式 " +#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" +#~ " CIFAR-10 子集会如何?增加更多客户端会如何?" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "To help you start and manage all" +#~ " of the concurrently executing training " +#~ "runs, Flower offers one additional " +#~ "long-running server-side service called " +#~ "**SuperExec**. When you type ``flwr " +#~ "run`` to start a new training run," +#~ " the ``flwr`` CLI bundles your local" +#~ " project (mainly your ``ServerApp`` and " +#~ "``ClientApp``) and sends it to the " +#~ "**SuperExec**. The **SuperExec** will then " +#~ "take care of starting and managing " +#~ "your ``ServerApp``, which in turn " +#~ "selects SuperNodes to execute your " +#~ "``ClientApp``." #~ msgstr "" -#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" -#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "Within the handle function:" -#~ msgstr "在句柄函数内:" +#~ msgid "" +#~ "This architecture allows many users to" +#~ " (concurrently) run their projects on " +#~ "the same federation, simply by typing" +#~ " ``flwr run`` on their local " +#~ "developer machine." +#~ msgstr "" -#~ msgid "And add a new function:" -#~ msgstr "并增加一个新函数:" +#~ msgid "Flower Deployment Engine with SuperExec" +#~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" -#~ msgstr "希望您在运行程序时能得到预期的结果!" +#~ msgid "The SuperExec service for managing concurrent training runs in Flower." +#~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "FED Template" +#~ msgstr "FED 模板" -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "Table of Contents" +#~ msgstr "目录" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "[Table of Contents](#table-of-contents)" +#~ msgstr "[目录](#table-of-contents)" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "Flower 服务器。" +#~ msgid "[Summary](#summary)" +#~ msgstr "[总结](#summary)" -#~ msgid "Unreleased" -#~ msgstr "尚未发布" +#~ msgid "[Motivation](#motivation)" +#~ msgstr "[动机](#motivation)" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "[Goals](#goals)" +#~ msgstr "[目标](#goals)" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "[Non-Goals](#non-goals)" +#~ msgstr "[非目标](#non-goals)" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" -#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "[Proposal](#proposal)" +#~ msgstr "[计划](#proposal)" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "[Drawbacks](#drawbacks)" +#~ msgstr "[缺点](#drawbacks)" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "[Alternatives Considered](#alternatives-considered)" +#~ msgstr "[备选方案](#alternatives-considered)" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "[Appendix](#appendix)" +#~ msgstr "[附录](#appendix)" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "Summary" +#~ msgstr "总结" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" -#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "\\[TODO - sentence 1: summary of the problem\\]" +#~ msgstr "\\[TODO - 句子 1: 问题概括\\]" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "\\[TODO - sentence 2: summary of the solution\\]" +#~ msgstr "\\[TODO - 句子 2: 解决方案概括\\]" + +#~ msgid "Motivation" +#~ msgstr "动机" + +#~ msgid "\\[TODO\\]" +#~ msgstr "\\[TODO\\]" + +#~ msgid "Goals" +#~ msgstr "目标" + +#~ msgid "Non-Goals" +#~ msgstr "非目标" + +#~ msgid "Proposal" +#~ msgstr "提案" + +#~ msgid "Drawbacks" +#~ msgstr "缺点" + +#~ msgid "Alternatives Considered" +#~ msgstr "备选方案" + +#~ msgid "\\[Alternative 1\\]" +#~ msgstr "\\[备选 1\\]" + +#~ msgid "\\[Alternative 2\\]" +#~ msgstr "\\[备选 2\\]" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "Flower Enhancement Doc" +#~ msgstr "Flower 改善文档" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#~ msgstr "[增强文档模版](#enhancement-doc-template)" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "[Metadata](#metadata)" +#~ msgstr "[描述数据](#metadata)" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" -#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "[Workflow](#workflow)" +#~ msgstr "[工作流程](#workflow)" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "[GitHub Issues](#github-issues)" +#~ msgstr "[GitHub 问题](#github-issues)" -#~ msgid "Edge Client Engine" -#~ msgstr "边缘客户端引擎" +#~ msgid "[Google Docs](#google-docs)" +#~ msgstr "[谷歌文档](#google-docs)" -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Edge Client Engine" -#~ msgstr "具有边缘客户端引擎的`Flower `核心架构" +#~ msgid "A Flower Enhancement is a standardized development process to" +#~ msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" -#~ msgid "Virtual Client Engine" -#~ msgstr "虚拟客户端引擎" +#~ msgid "provide a common structure for proposing larger changes" +#~ msgstr "为提出更大规模的改动提供一个共同的结构" -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with Virtual Client Engine" -#~ msgstr "具有虚拟客户端引擎的`Flower `核心架构" +#~ msgid "ensure that the motivation for a change is clear" +#~ msgstr "确保改动的动机明确" -#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" -#~ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" +#~ msgid "persist project information in a version control system" +#~ msgstr "将项目信息保存在版本控制系统中" -#~ msgid "" -#~ "`Flower `_ core framework " -#~ "architecture with both Virtual Client " -#~ "Engine and Edge Client Engine" -#~ msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +#~ msgid "document the motivation for impactful user-facing changes" +#~ msgstr "记录面向用户的具有影响力的改动的动机" -#~ msgid "Clone the flower repository." -#~ msgstr "**叉花仓库**" +#~ msgid "reserve GitHub issues for tracking work in flight" +#~ msgstr "保留 GitHub 问题,用于跟踪进行中的工作" #~ msgid "" -#~ "Please follow the first section on " -#~ ":doc:`Run Flower using Docker ` which " -#~ "covers this step in more detail." -#~ msgstr "" -#~ "请阅读 :doc:`Run Flower using Docker " -#~ "` " -#~ "的第一节,其中更详细地介绍了这一步骤。" +#~ "ensure community participants can successfully" +#~ " drive changes to completion across " +#~ "one or more releases while stakeholders" +#~ " are adequately represented throughout the" +#~ " process" +#~ msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" -#~ msgid "``22.04``" -#~ msgstr "``1.0.0rc1``" +#~ msgid "Hence, an Enhancement Doc combines aspects of" +#~ msgstr "因此,\"增强文件\"将以下方面结合起来" -#~ msgid "``23.0.1``" -#~ msgstr "``1.0.0rc1``" +#~ msgid "a feature, and effort-tracking document" +#~ msgstr "一个功能和效力跟踪文档" -#~ msgid "``69.0.2``" -#~ msgstr "``1.0.0b0``" +#~ msgid "a product requirements document" +#~ msgstr "一个产品需要文档" -#~ msgid "``1.8.0``" -#~ msgstr "``1.0.0b0``" +#~ msgid "a design document" +#~ msgstr "一个设计文档" -#~ msgid "Building the SuperLink/SuperNode or ServerApp image" -#~ msgstr "启动服务器" +#~ msgid "" +#~ "into one file, which is created " +#~ "incrementally in collaboration with the " +#~ "community." +#~ msgstr "该文件是与社区合作逐步创建的。" -#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgid "" +#~ "For far-fetching changes or features " +#~ "proposed to Flower, an abstraction " +#~ "beyond a single GitHub issue or " +#~ "pull request is required to understand" +#~ " and communicate upcoming changes to " +#~ "the project." #~ msgstr "" +#~ "对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub " +#~ "问题或拉取请求(pull request)的抽象概念,以了解和沟通项目即将发生的变更。" #~ msgid "" -#~ "The following example creates a " -#~ "SuperLink/SuperNode or ServerApp image with" -#~ " the official Flower base image:" -#~ msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" - -#~ msgid "Trigger the CI for building the Docker images." -#~ msgstr "官方 Ubuntu Docker 映像的版本。" +#~ "The purpose of this process is to" +#~ " reduce the amount of \"tribal " +#~ "knowledge\" in our community. By moving" +#~ " decisions from Slack threads, video " +#~ "calls, and hallway conversations into a" +#~ " well-tracked artifact, this process " +#~ "aims to enhance communication and " +#~ "discoverability." +#~ msgstr "" +#~ "这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " +#~ "线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" #~ msgid "" -#~ "To trigger the workflow, a collaborator" -#~ " must create a ``workflow_dispatch`` event" -#~ " in the GitHub CI. This can be" -#~ " done either through the UI or " -#~ "via the GitHub CLI. The event " -#~ "requires only one input, the Flower " -#~ "version, to be released." -#~ msgstr "" +#~ "Roughly any larger, user-facing " +#~ "enhancement should follow the Enhancement " +#~ "process. If an enhancement would be " +#~ "described in either written or verbal" +#~ " communication to anyone besides the " +#~ "author or developer, then consider " +#~ "creating an Enhancement Doc." +#~ msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" -#~ msgid "**Via the UI**" -#~ msgstr "**审查 PR**" +#~ msgid "" +#~ "Similarly, any technical effort (refactoring," +#~ " major architectural change) that will " +#~ "impact a large section of the " +#~ "development community should also be " +#~ "communicated widely. The Enhancement process" +#~ " is suited for this even if it" +#~ " will have zero impact on the " +#~ "typical user or operator." +#~ msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" #~ msgid "" -#~ "Go to the ``Build docker images`` " -#~ "workflow `page " -#~ "`_." +#~ "For small changes and additions, going" +#~ " through the Enhancement process would " +#~ "be time-consuming and unnecessary. This" +#~ " includes, for example, adding new " +#~ "Federated Learning algorithms, as these " +#~ "only add features without changing how" +#~ " Flower works or is used." #~ msgstr "" +#~ "对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变" +#~ " \"Flower \"的工作或使用方式。" #~ msgid "" -#~ "Click on the ``Run workflow`` button " -#~ "and type the new version of Flower" -#~ " in the ``Version of Flower`` input" -#~ " field." -#~ msgstr "" +#~ "Enhancements are different from feature " +#~ "requests, as they are already providing" +#~ " a laid-out path for implementation" +#~ " and are championed by members of " +#~ "the community." +#~ msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" -#~ msgid "Click on the **green** ``Run workflow`` button." -#~ msgstr "" +#~ msgid "" +#~ "An Enhancement is captured in a " +#~ "Markdown file that follows a defined " +#~ "template and a workflow to review " +#~ "and store enhancement docs for reference" +#~ " — the Enhancement Doc." +#~ msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" -#~ msgid "**Via the GitHub CI**" -#~ msgstr "" +#~ msgid "Enhancement Doc Template" +#~ msgstr "增强文档模板" #~ msgid "" -#~ "Make sure you are logged in via" -#~ " ``gh auth login`` and that the " -#~ "current working directory is the root" -#~ " of the Flower repository." -#~ msgstr "" +#~ "Each enhancement doc is provided as " +#~ "a Markdown file having the following " +#~ "structure" +#~ msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" + +#~ msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#~ msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" + +#~ msgid "Title (same as in metadata)" +#~ msgstr "标题(与描述数据中的标题相同)" + +#~ msgid "Table of Contents (if needed)" +#~ msgstr "目录(如有需要)" + +#~ msgid "Notes/Constraints/Caveats (optional)" +#~ msgstr "注意事项/限制/警告(可选)" + +#~ msgid "Design Details (optional)" +#~ msgstr "设计细节(可选)" + +#~ msgid "Graduation Criteria" +#~ msgstr "毕业标准" + +#~ msgid "Upgrade/Downgrade Strategy (if applicable)" +#~ msgstr "升级/降级策略(如适用)" + +#~ msgid "As a reference, this document follows the above structure." +#~ msgstr "作为参考,本文件采用上述结构。" #~ msgid "" -#~ "Trigger the workflow via ``gh workflow" -#~ " run docker-images.yml -f flwr-" -#~ "version=``." -#~ msgstr "" +#~ "**fed-number** (Required) The `fed-" +#~ "number` of the last Flower Enhancement" +#~ " Doc + 1. With this number, it" +#~ " becomes easy to reference other " +#~ "proposals." +#~ msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" -#~ msgid "Example: JAX - Run JAX Federated" -#~ msgstr "示例: JAX - 运行联邦式 JAX" +#~ msgid "**title** (Required) The title of the proposal in plain language." +#~ msgstr "**标题** (必填)用简明语言写出提案的标题。" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__. Supported " -#~ "architectures include ``amd64`` and " -#~ "``arm64v8``." -#~ msgstr "" -#~ "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 " -#~ "`Docker Hub `_" -#~ " 上找到这些镜像。" +#~ "**status** (Required) The current status " +#~ "of the proposal. See [workflow](#workflow) " +#~ "for the possible states." +#~ msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" #~ msgid "" -#~ "If you do not see the version " -#~ "of Docker but instead get an error" -#~ " saying that the command was not " -#~ "found, you will need to install " -#~ "Docker first. You can find installation" -#~ " instruction `here `_." -#~ msgstr "" -#~ "如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在" -#~ " `_ 找到安装说明。" +#~ "**authors** (Required) A list of authors" +#~ " of the proposal. This is simply " +#~ "the GitHub ID." +#~ msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" #~ msgid "" -#~ "On Linux, Docker commands require " -#~ "``sudo`` privilege. If you want to " -#~ "avoid using ``sudo``, you can follow " -#~ "the `Post-installation steps " -#~ "`_" -#~ " on the official Docker website." -#~ msgstr "" -#~ "在 Linux 上,Docker 命令需要 ``sudo`` " -#~ "权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 `安装后步骤" -#~ " `_进行操作。" +#~ "**creation-date** (Required) The date " +#~ "that the proposal was first submitted" +#~ " in a PR." +#~ msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" #~ msgid "" -#~ "To ensure optimal performance and " -#~ "compatibility, the SuperLink, SuperNode and" -#~ " ServerApp image must have the same" -#~ " version when running together. This " -#~ "guarantees seamless integration and avoids " -#~ "potential conflicts or issues that may" -#~ " arise from using different versions." -#~ msgstr "" -#~ "为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " -#~ "映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" +#~ "**last-updated** (Optional) The date " +#~ "that the proposal was last changed " +#~ "significantly." +#~ msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" -#~ msgid "Flower SuperLink" -#~ msgstr "flower-superlink" +#~ msgid "" +#~ "**see-also** (Optional) A list of " +#~ "other proposals that are relevant to " +#~ "this one." +#~ msgstr "**另见** (可选)与本提案相关的其他提案清单。" -#~ msgid "Quickstart" -#~ msgstr "快速入门 JAX" +#~ msgid "**replaces** (Optional) A list of proposals that this one replaces." +#~ msgstr "**取代**(可选) 这份提案所取代的提案列表。" -#~ msgid "If you're looking to try out Flower, you can use the following command:" -#~ msgstr "如果您想试用 Flower,可以使用以下命令:" +#~ msgid "" +#~ "**superseded-by** (Optional) A list of" +#~ " proposals that this one supersedes." +#~ msgstr "**被取代者** (可选) 此提案取代的提案列表。" + +#~ msgid "Workflow" +#~ msgstr "工作流程" #~ msgid "" -#~ "The command pulls the Docker image " -#~ "with the tag ``1.8.0`` from Docker " -#~ "Hub. The tag specifies the Flower " -#~ "version. In this case, Flower 1.8.0. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." -#~ msgstr "" -#~ "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的" -#~ " Docker 镜像。标签包含使用 Flower、Python 和 Ubuntu" -#~ " 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和" -#~ " Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" +#~ "The idea forming the enhancement should" +#~ " already have been discussed or " +#~ "pitched in the community. As such, " +#~ "it needs a champion, usually the " +#~ "author, who shepherds the enhancement. " +#~ "This person also has to find " +#~ "committers to Flower willing to review" +#~ " the proposal." +#~ msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower SuperLink. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "New enhancements are checked in with " +#~ "a file name in the form of " +#~ "`NNNN-YYYYMMDD-enhancement-title.md`, with " +#~ "`NNNN` being the Flower Enhancement Doc" +#~ " number, to `enhancements`. All " +#~ "enhancements start in `provisional` state " +#~ "as part of a pull request. " +#~ "Discussions are done as part of " +#~ "the pull request review." #~ msgstr "" -#~ "``-p :`` 标记会告诉 Docker 将主机的端口" -#~ " ``9091``/``9092`` 映射到容器的端口 ``9091``/`9092``,这样你就可以在" -#~ " ``http://localhost:9091`` 上访问 Driver API,在 " -#~ "``http://localhost:9092`` 上访问 Fleet " -#~ "API。最后,标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " -#~ "``--insecure`` 。" +#~ "新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md`" +#~ " 的文件名签入,其中 `NNNN` 是花朵增强文档的编号,并将其转入 " +#~ "`enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " +#~ "`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `__ when " -#~ "deploying to a production environment." +#~ "Once an enhancement has been reviewed" +#~ " and approved, its status is changed" +#~ " to `implementable`. The actual " +#~ "implementation is then done in separate" +#~ " pull requests. These pull requests " +#~ "should mention the respective enhancement " +#~ "as part of their description. After " +#~ "the implementation is done, the proposal" +#~ " status is changed to `implemented`." #~ msgstr "" -#~ "不安全 \"标志启用不安全通信(使用 HTTP,而非 " -#~ "HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " -#~ "`_。" +#~ "一旦增强功能通过审核和批准,其状态就会变为 " +#~ "`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 " +#~ "`已实施`。" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the SuperLink " -#~ "supports:" -#~ msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" +#~ "Under certain conditions, other states " +#~ "are possible. An Enhancement has the " +#~ "following states:" +#~ msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" -#~ msgid "Mounting a volume to store the state on the host system" -#~ msgstr "在主机系统上挂载卷以存储状态" +#~ msgid "" +#~ "`provisional`: The enhancement has been " +#~ "proposed and is actively being defined." +#~ " This is the starting state while " +#~ "the proposal is being fleshed out " +#~ "and actively defined and discussed." +#~ msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" + +#~ msgid "`implementable`: The enhancement has been reviewed and approved." +#~ msgstr "`可实施`: 增强功能已审核通过。" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a directory where you want " -#~ "to save the file on your host " -#~ "system and a name for the database" -#~ " file. By default, the SuperLink " -#~ "container runs with a non-root " -#~ "user called ``app`` with the user " -#~ "ID ``49999``. It is recommended to " -#~ "create new directory and change the " -#~ "user ID of the directory to " -#~ "``49999`` to ensure the mounted " -#~ "directory has the proper permissions. If" -#~ " you later want to delete the " -#~ "directory, you can change the user " -#~ "ID back to the current user ID " -#~ "by running ``sudo chown -R $USER:$(id" -#~ " -gn) state``." -#~ msgstr "" +#~ "`implemented`: The enhancement has been " +#~ "implemented and is no longer actively" +#~ " changed." +#~ msgstr "`已实施`: 增强功能已实施,不再主动更改。" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/certificates/`` directory of the " -#~ "container. This allows the SuperLink to" -#~ " access the files within the " -#~ "container. The ``ro`` stands for " -#~ "``read-only``. Docker volumes default to" -#~ " ``read-write``; that option tells " -#~ "Docker to make the volume ``read-" -#~ "only`` instead. Finally, we pass the " -#~ "names of the certificates and key " -#~ "file to the SuperLink with the " -#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " -#~ "and ``--ssl-keyfile`` flag." -#~ msgstr "" -#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " -#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " -#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " -#~ "标志将证书名称传递给服务器。" +#~ "`deferred`: The enhancement is proposed " +#~ "but not actively being worked on." +#~ msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" #~ msgid "" -#~ "Because Flower containers, by default, " -#~ "run with a non-root user ``app``," -#~ " the mounted files and directories " -#~ "must have the proper permissions for " -#~ "the user ID ``49999``. For example, " -#~ "to change the user ID of all " -#~ "files in the ``certificates/`` directory, " -#~ "you can run ``sudo chown -R " -#~ "49999:49999 certificates/*``." +#~ "`rejected`: The authors and reviewers " +#~ "have decided that this enhancement is" +#~ " not moving forward." +#~ msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" + +#~ msgid "`withdrawn`: The authors have withdrawn the enhancement." +#~ msgstr "`撤回`: 作者已撤回增强功能。" + +#~ msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#~ msgstr "`已替换`: 增强功能已被新的增强功能取代。" + +#~ msgid "" +#~ "Adding an additional process to the " +#~ "ones already provided by GitHub (Issues" +#~ " and Pull Requests) adds more " +#~ "complexity and can be a barrier " +#~ "for potential first-time contributors." +#~ msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" + +#~ msgid "" +#~ "Expanding the proposal template beyond " +#~ "the single-sentence description currently " +#~ "required in the features issue template" +#~ " may be a heavy burden for " +#~ "non-native English speakers." +#~ msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" + +#~ msgid "GitHub Issues" +#~ msgstr "GitHub 问题" + +#~ msgid "" +#~ "Using GitHub Issues for these kinds " +#~ "of enhancements is doable. One could " +#~ "use, for example, tags, to differentiate" +#~ " and filter them from other issues." +#~ " The main issue is in discussing " +#~ "and reviewing an enhancement: GitHub " +#~ "issues only have a single thread " +#~ "for comments. Enhancements usually have " +#~ "multiple threads of discussion at the" +#~ " same time for various parts of " +#~ "the doc. Managing these multiple " +#~ "discussions can be confusing when using" +#~ " GitHub Issues." +#~ msgstr "" +#~ "使用 GitHub Issues " +#~ "进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: GitHub " +#~ "问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " +#~ "问题时,管理这些多重讨论会很混乱。" + +#~ msgid "Google Docs" +#~ msgstr "谷歌文档" + +#~ msgid "" +#~ "Google Docs allow for multiple threads" +#~ " of discussions. But as Google Docs" +#~ " are hosted outside the project, " +#~ "their discoverability by the community " +#~ "needs to be taken care of. A " +#~ "list of links to all proposals has" +#~ " to be managed and made available " +#~ "for the community. Compared to shipping" +#~ " proposals as part of Flower's " +#~ "repository, the potential for missing " +#~ "links is much higher." +#~ msgstr "" +#~ "谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" +#~ " Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" + +#~ msgid "FED - Flower Enhancement Doc" +#~ msgstr "FED - Flower 增强文件" + +#~ msgid "" +#~ "Along with model parameters, Flower can" +#~ " send configuration values to clients. " +#~ "Configuration values can be used for " +#~ "various purposes. They are, for example," +#~ " a popular way to control client-" +#~ "side hyperparameters from the server." +#~ msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" + +#~ msgid "" +#~ "Configuration values are represented as " +#~ "a dictionary with ``str`` keys and " +#~ "values of type ``bool``, ``bytes``, " +#~ "``double`` (64-bit precision float), ``int``," +#~ " or ``str`` (or equivalent types in" +#~ " different languages). Here is an " +#~ "example of a configuration dictionary in" +#~ " Python:" +#~ msgstr "" +#~ "配置值以字典的形式表示,字典的键为 ``str``,值的类型为 " +#~ "``bool``、``bytes``、``double``(64 位精度浮点型)、``int``或 " +#~ "``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" + +#~ msgid "" +#~ "One can, for example, convert a " +#~ "list of floating-point numbers to " +#~ "a JSON string, then send the JSON" +#~ " string using the configuration dictionary," +#~ " and then convert the JSON string " +#~ "back to a list of floating-point" +#~ " numbers on the client." +#~ msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" + +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like ``FedAvg``. " +#~ "Built-in strategies support so-called " +#~ "configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." +#~ msgstr "" +#~ "向客户端发送配置值的最简单方法是使用内置策略,如 " +#~ ":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" + +#~ msgid "" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter ``on_fit_config_fn``:" +#~ msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" + +#~ msgid "" +#~ "One the client side, we receive " +#~ "the configuration dictionary in ``fit``:" +#~ msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" + +#~ msgid "" +#~ "There is also an `on_evaluate_config_fn` " +#~ "to configure evaluation, which works the" +#~ " same way. They are separate " +#~ "functions because one might want to " +#~ "send different configuration values to " +#~ "`evaluate` (for example, to use a " +#~ "different batch size)." +#~ msgstr "" +#~ "还有一个 `on_evaluate_config_fn` " +#~ "用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " +#~ "发送不同的配置值(例如,使用不同的批量大小)。" + +#~ msgid "" +#~ "The built-in strategies call this " +#~ "function every round (that is, every " +#~ "time `Strategy.configure_fit` or " +#~ "`Strategy.configure_evaluate` runs). Calling " +#~ "`on_evaluate_config_fn` every round allows us" +#~ " to vary/change the config dict over" +#~ " consecutive rounds. If we wanted to" +#~ " implement a hyperparameter schedule, for" +#~ " example, to increase the number of" +#~ " local epochs during later rounds, we" +#~ " could do the following:" +#~ msgstr "" +#~ "内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " +#~ "`Strategy.configure_evaluate` 时)。每轮调用 " +#~ "`on_evaluate_config_fn` " +#~ "允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" + +#~ msgid "The ``FedAvg`` strategy will call this function *every round*." +#~ msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" + +#~ msgid "Configuring individual clients" +#~ msgstr "配置个别客户端" + +#~ msgid "" +#~ "In some cases, it is necessary to" +#~ " send different configuration values to " +#~ "different clients." +#~ msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes ``FedAvg`` by adding a custom" +#~ " ``\"hello\": \"world\"`` configuration key/value" +#~ " pair to the config dict of a" +#~ " *single client* (only the first " +#~ "client in the list, the other " +#~ "clients in this round to not " +#~ "receive this \"special\" config value):" +#~ msgstr "" +#~ "这可以通过定制现有策略或 `从头开始实施一个定制策略 " +#~ "`_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " +#~ "dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " +#~ "中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" + +#~ msgid "Configure logging" +#~ msgstr "配置日志记录" + +#~ msgid "" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" +#~ msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. ``INFO``, ``DEBUG``)," +#~ " a timestamp, the line where the " +#~ "logging took place from, as well " +#~ "as the log message itself. In this" +#~ " way, the logger would typically " +#~ "display information on your terminal as" +#~ " follows:" #~ msgstr "" +#~ "相关信息包括:日志信息级别(例如 " +#~ ":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" + +#~ msgid "Saving log to file" +#~ msgstr "将日志保存到文件" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ "``fl.server.start_server``) and when using the" +#~ " ``VirtualClientEngine`` (i.e. when you do" +#~ " ``fl.simulation.start_simulation``). In some " +#~ "situations you might want to save " +#~ "this log to disk. You can do " +#~ "so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" +#~ "默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC " +#~ "的联邦学习(即执行 :code:`fl.server.start_server` 时),也适用于使用 " +#~ ":code:`VirtualClientEngine` 时(即执行 " +#~ ":code:`fl.simulation.start_simulation` " +#~ "时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure()" +#~ " " +#~ "`_" +#~ " 函数。例如:" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to ``log.txt``. This file will be" +#~ " created in the same directory as " +#~ "were you are running the code " +#~ "from. If we inspect we see the " +#~ "log above is also recorded but " +#~ "prefixing with ``identifier`` each line:" +#~ msgstr "" +#~ "通过上述操作,Flower 会将您在终端上看到的日志记录到 " +#~ ":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " +#~ ":code:`identifier` 作为前缀:" + +#~ msgid "Log your own messages" +#~ msgstr "记录自己的信息" + +#~ msgid "" +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." +#~ msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" + +#~ msgid "" +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." +#~ msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" + +#~ msgid "Log to a remote service" +#~ msgstr "登录远程服务" #~ msgid "" -#~ "The SuperNode Docker image comes with" -#~ " a pre-installed version of Flower" -#~ " and serves as a base for " -#~ "building your own SuperNode image." -#~ msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" +#~ "The ``fl.common.logger.configure`` function, also" +#~ " allows specifying a host to which" +#~ " logs can be pushed (via ``POST``)" +#~ " through a native Python " +#~ "``logging.handler.HTTPHandler``. This is a " +#~ "particularly useful feature in ``gRPC``-based" +#~ " Federated Learning workloads where " +#~ "otherwise gathering logs from all " +#~ "entities (i.e. the server and the " +#~ "clients) might be cumbersome. Note that" +#~ " in Flower simulation, the server " +#~ "automatically displays all logs. You can" +#~ " still specify a ``HTTPHandler`` should " +#~ "you wish to backup or analyze the" +#~ " logs somewhere else." +#~ msgstr "" +#~ "此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " +#~ ":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 " +#~ ":code:`POST`)。在基于 :code:`gRPC` " +#~ "的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 Flower" +#~ " 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" + +#~ msgid "Monitor simulation" +#~ msgstr "监控模拟" #~ msgid "" -#~ "We will use the ``quickstart-pytorch``" -#~ " example, which you can find in " -#~ "the Flower repository, to illustrate how" -#~ " you can dockerize your ClientApp." +#~ "Flower allows you to monitor system " +#~ "resources while running your simulation. " +#~ "Moreover, the Flower simulation engine " +#~ "is powerful and enables you to " +#~ "decide how to allocate resources per " +#~ "client manner and constrain the total" +#~ " usage. Insights from resource consumption" +#~ " can help you make smarter decisions" +#~ " and speed up the execution time." #~ msgstr "" -#~ "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 " -#~ "ClientApp 进行 docker 化。" +#~ "Flower 允许您在运行模拟时监控系统资源。此外,Flower " +#~ "仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" #~ msgid "" -#~ "Before we can start, we need to" -#~ " meet a few prerequisites in our " -#~ "local development environment. You can " -#~ "skip the first part if you want" -#~ " to run your ClientApp instead of " -#~ "the ``quickstart-pytorch`` example." -#~ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" +#~ "The specific instructions assume you are" +#~ " using macOS and have the `Homebrew" +#~ " `_ package manager installed." +#~ msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" -#~ msgid "Let's assume the following project layout:" -#~ msgstr "假设项目布局如下" +#~ msgid "Downloads" +#~ msgstr "下载" #~ msgid "" -#~ "First, we need to create a " -#~ "``requirements.txt`` file in the directory " -#~ "where the ``ClientApp`` code is located." -#~ " In the file, we list all the" -#~ " dependencies that the ClientApp requires." +#~ "`Prometheus `_ is used " +#~ "for data collection, while `Grafana " +#~ "`_ will enable you to" +#~ " visualize the collected data. They " +#~ "are both well integrated with `Ray " +#~ "`_ which Flower uses " +#~ "under the hood." #~ msgstr "" -#~ "首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 " -#~ "``requirements.txt`` 文件。在该文件中,我们列出了 ClientApp " -#~ "需要的所有依赖项。" +#~ "`Prometheus `_ 用于收集数据,而 " +#~ "`Grafana `_ 则能让你将收集到的数据可视化。它们都与 " +#~ "Flower 在引擎下使用的 `Ray `_ " +#~ "紧密集成。" #~ msgid "" -#~ "Note that `flwr `__" -#~ " is already installed in the " -#~ "``flwr/supernode`` base image, so you " -#~ "only need to include other package " -#~ "dependencies in your ``requirements.txt``, " -#~ "such as ``torch``, ``tensorflow``, etc." +#~ "Overwrite the configuration files (depending" +#~ " on your device, it might be " +#~ "installed on a different path)." +#~ msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" + +#~ msgid "If you are on an M1 Mac, it should be:" +#~ msgstr "如果你使用的是 M1 Mac,应该是这样:" + +#~ msgid "On the previous generation Intel Mac devices, it should be:" +#~ msgstr "在上一代英特尔 Mac 设备上,应该是这样:" + +#~ msgid "" +#~ "Open the respective configuration files " +#~ "and change them. Depending on your " +#~ "device, use one of the two " +#~ "following commands:" +#~ msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" + +#~ msgid "" +#~ "and then delete all the text in" +#~ " the file and paste a new " +#~ "Prometheus config you see below. You " +#~ "may adjust the time intervals to " +#~ "your requirements:" +#~ msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" + +#~ msgid "" +#~ "Now after you have edited the " +#~ "Prometheus configuration, do the same " +#~ "with the Grafana configuration files. " +#~ "Open those using one of the " +#~ "following commands as before:" +#~ msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" + +#~ msgid "" +#~ "Your terminal editor should open and " +#~ "allow you to apply the following " +#~ "configuration as before." +#~ msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" + +#~ msgid "" +#~ "Congratulations, you just downloaded all " +#~ "the necessary software needed for " +#~ "metrics tracking. Now, let’s start it." +#~ msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" + +#~ msgid "Tracking metrics" +#~ msgstr "跟踪指标" + +#~ msgid "" +#~ "Before running your Flower simulation, " +#~ "you have to start the monitoring " +#~ "tools you have just installed and " +#~ "configured." +#~ msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" + +#~ msgid "" +#~ "Please include the following argument in" +#~ " your Python code when starting a " +#~ "simulation." +#~ msgstr "开始模拟时,请在 Python 代码中加入以下参数。" + +#~ msgid "Now, you are ready to start your workload." +#~ msgstr "现在,您可以开始工作了。" + +#~ msgid "" +#~ "Shortly after the simulation starts, you" +#~ " should see the following logs in " +#~ "your terminal:" +#~ msgstr "模拟启动后不久,您就会在终端中看到以下日志:" + +#~ msgid "You can look at everything at http://127.0.0.1:8265 ." +#~ msgstr "您可以在 ``_ 查看所有内容。" + +#~ msgid "" +#~ "It's a Ray Dashboard. You can " +#~ "navigate to Metrics (on the left " +#~ "panel, the lowest option)." +#~ msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" + +#~ msgid "" +#~ "Or alternatively, you can just see " +#~ "them in Grafana by clicking on the" +#~ " right-up corner, “View in Grafana”." +#~ " Please note that the Ray dashboard" +#~ " is only accessible during the " +#~ "simulation. After the simulation ends, " +#~ "you can only use Grafana to " +#~ "explore the metrics. You can start " +#~ "Grafana by going to " +#~ "``http://localhost:3000/``." #~ msgstr "" -#~ "请注意,`flwr `__ " -#~ "已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" +#~ "或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana " +#~ "中查看它们。请注意,Ray 仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana " +#~ "浏览指标。您可以访问 ``http://localhost:3000/``启动 Grafana。" #~ msgid "" -#~ "Next, we create a Dockerfile. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.supernode`` in ``examples/quickstart-" -#~ "pytorch``." +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port ``3000`` on your" +#~ " machine as long as they are " +#~ "running." #~ msgstr "" -#~ "接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch``" -#~ " 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为" -#~ " ``Dockerfile.supernode`` 的新文件。" +#~ "完成可视化后,请停止 Prometheus 和 " +#~ "Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" + +#~ msgid "Resource allocation" +#~ msgstr "资源分配" #~ msgid "" -#~ "The ``Dockerfile.supernode`` contains the " -#~ "instructions that assemble the SuperNode " -#~ "image." -#~ msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" +#~ "You must understand how the Ray " +#~ "library works to efficiently allocate " +#~ "system resources to simulation clients " +#~ "on your own." +#~ msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the SuperNode " -#~ "image tagged ``nightly`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. Next, we install the" -#~ " ClientApp dependencies by copying the " -#~ "``requirements.txt`` file into the image " -#~ "and run ``pip install``. In the " -#~ "last two lines, we copy the " -#~ "``client.py`` module into the image and" -#~ " set the entry point to ``flower-" -#~ "client-app`` with the argument " -#~ "``client:app``. The argument is the " -#~ "object reference of the ClientApp " -#~ "(``:``) that will be run" -#~ " inside the ClientApp." +#~ "Initially, the simulation (which Ray " +#~ "handles under the hood) starts by " +#~ "default with all the available resources" +#~ " on the system, which it shares " +#~ "among the clients. It doesn't mean " +#~ "it divides it equally among all of" +#~ " them, nor that the model training" +#~ " happens at all of them " +#~ "simultaneously. You will learn more " +#~ "about that in the later part of" +#~ " this blog. You can check the " +#~ "system resources by running the " +#~ "following:" #~ msgstr "" -#~ "在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 " -#~ "SuperNode 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " -#~ "``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " -#~ "文件复制到映像中并运行 ``pip install`` 来安装 ClientApp " -#~ "依赖项。最后两行,我们将 ``client.py`` 模块复制到映像中,并将入口点设置为 " -#~ "``flower-client-app``,参数为 ``client:app``。参数是将在 " -#~ "ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" +#~ "最初,模拟(由 Ray " +#~ "在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#~ msgid "Building the SuperNode Docker image" -#~ msgstr "启动服务器" +#~ msgid "In Google Colab, the result you see might be similar to this:" +#~ msgstr "在 Google Colab 中,您看到的结果可能与此类似:" #~ msgid "" -#~ "We gave the image the name " -#~ "``flwr_supernode``, and the tag ``0.0.1``. " -#~ "Remember that the here chosen values " -#~ "only serve as an example. You can" -#~ " change them to your needs." +#~ "However, you can overwrite the defaults." +#~ " When starting a simulation, do the" +#~ " following (you don't need to " +#~ "overwrite all of them):" +#~ msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" + +#~ msgid "Let’s also specify the resource for a single client." +#~ msgstr "我们还可以为单个客户指定资源。" + +#~ msgid "" +#~ "Now comes the crucial part. Ray " +#~ "will start a new client only when" +#~ " it has all the required resources" +#~ " (such that they run in parallel) " +#~ "when the resources allow." +#~ msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" + +#~ msgid "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ "``client_num_gpus = 0.5`` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ "``client_num_gpus = 2``, the simulation " +#~ "wouldn't start (even if you had 2" +#~ " GPUs but decided to set 1 in" +#~ " ``ray_init_args``)." #~ msgstr "" -#~ "我们将图像命名为 ``flwr_supernode``,标签为 " -#~ "``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" +#~ "在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus " +#~ "= 0.5` 将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 " +#~ ":code:`client_num_gpus = 2`,模拟将无法启动(即使您有 2 个" +#~ " GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#~ msgid "Running the SuperNode Docker image" -#~ msgstr "启动服务器" +#~ msgid "Q: I don't see any metrics logged." +#~ msgstr "问:我没有看到任何指标记录。" -#~ msgid "Now that we have built the SuperNode image, we can finally run it." -#~ msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" +#~ msgid "" +#~ "A: The timeframe might not be " +#~ "properly set. The setting is in " +#~ "the top right corner (\"Last 30 " +#~ "minutes\" by default). Please change the" +#~ " timeframe to reflect the period when" +#~ " the simulation was running." +#~ msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" -#~ msgid "Let's break down each part of this command:" -#~ msgstr "让我们来分析一下这条命令的各个部分:" +#~ msgid "" +#~ "Q: I see “Grafana server not " +#~ "detected. Please make sure the Grafana" +#~ " server is running and refresh this" +#~ " page” after going to the Metrics " +#~ "tab in Ray Dashboard." +#~ msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" #~ msgid "" -#~ "``--rm``: This option specifies that the" -#~ " container should be automatically removed" -#~ " when it stops." -#~ msgstr "`-rm``: 该选项指定容器停止时应自动移除。" +#~ "A: You probably don't have Grafana " +#~ "running. Please check the running " +#~ "services" +#~ msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" -#~ msgid "``--insecure``: This option enables insecure communication." -#~ msgstr "不安全\": 该选项启用不安全通信。" +#~ msgid "" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to http://127.0.0.1:8265." +#~ msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" #~ msgid "" -#~ "``--superlink 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" -#~ msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" +#~ "A: Either the simulation has already " +#~ "finished, or you still need to " +#~ "start Prometheus." +#~ msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" -#~ msgid "API to connect to. Remember to update it with your SuperLink IP." -#~ msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" +#~ msgid "Resources" +#~ msgstr "资源" #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your SuperNodes." +#~ "Ray Dashboard: https://docs.ray.io/en/latest/ray-" +#~ "observability/getting-started.html" +#~ msgstr "Ray 仪表盘: ``_" + +#~ msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" #~ msgstr "" -#~ "要测试在本地运行 Flower,可以创建一个 \"桥接网络 " -#~ "`__\",使用\"--网络 \"参数并传递 Docker " -#~ "网络的名称,以运行超级节点。" +#~ "Ray 指标: ``_" #~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "SuperNode binary. To see all available" -#~ " flags that the SuperNode supports, " -#~ "run:" -#~ msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" +#~ "The ``VirtualClientEngine`` schedules, launches " +#~ "and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the ``VirtualClientEngine`` are:" +#~ msgstr "" +#~ ":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" +#~ " `_启动的客户端),因为它们可以通过创建一个继承自 `flwr.client.NumPyClient " +#~ "`_ " +#~ "的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" + +#~ msgid "" +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" +#~ msgstr "" +#~ "运行 Flower " +#~ "模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " +#~ "\"start_simulation `_\" " +#~ "来启动模拟了,一个最简单的示例如下:" + +#~ msgid "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the ``ray_init_args`` input argument " +#~ "to ``start_simulation`` which the VCE " +#~ "internally passes to Ray's ``ray.init`` " +#~ "command. For a complete list of " +#~ "settings you can configure check the " +#~ "`ray.init `_ documentation. " +#~ "Do not set ``ray_init_args`` if you " +#~ "want the VCE to use all your " +#~ "system's CPUs and GPUs." +#~ msgstr "" +#~ "默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动" +#~ " Ray 时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 " +#~ ":code:`ray_init_args` 输入到 :code:`start_simulation` " +#~ "的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 " +#~ ":code:`ray.init` 命令。有关您可以配置的设置的完整列表,请查看 `ray.init " +#~ "`_ 文档。如果希望 VCE 使用系统中所有的 CPU " +#~ "和 GPU,请不要设置 :code:`ray_init_args`。" + +#~ msgid "" +#~ "By default the ``VirtualClientEngine`` assigns" +#~ " a single CPU core (and nothing " +#~ "else) to each virtual client. This " +#~ "means that if your system has 10" +#~ " cores, that many virtual clients can" +#~ " be concurrently running." +#~ msgstr "" +#~ "默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " +#~ "内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" + +#~ msgid "``num_cpus`` indicates the number of CPU cores a client would get." +#~ msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" + +#~ msgid "" +#~ "``num_gpus`` indicates the **ratio** of " +#~ "GPU memory a client gets assigned." +#~ msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" + +#~ msgid "Let's see a few examples:" +#~ msgstr "让我们来看几个例子:" + +#~ msgid "" +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." +#~ msgstr "" +#~ "要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray " +#~ "文档 `_。" + +#~ msgid "" +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" +#~ msgstr "" +#~ "在 Tensorflow/Keras 和 PyTorch 中进行 Flower" +#~ " 模拟的几个可随时运行的完整示例已在 `Flower 库 " +#~ "`_ 中提供。您也可以在 Google " +#~ "Colab 上运行它们:" + +#~ msgid "" +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." +#~ msgstr "" +#~ "Tensorflow/Keras模拟 " +#~ "`_:100个客户端在MNIST上协作训练一个MLP模型。" + +#~ msgid "" +#~ "`PyTorch Simulation " +#~ "`_: 100 clients collaboratively train" +#~ " a CNN model on MNIST." +#~ msgstr "" +#~ "PyTorch 模拟 " +#~ "`_:100 个客户端在 MNIST 上协作训练一个 CNN " +#~ "模型。" + +#~ msgid "" +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" +#~ msgstr "" +#~ "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" + +#~ msgid "" +#~ "Pass ``ray_init_args={\"address\"=\"auto\"}`` to " +#~ "`start_simulation `_ so the " +#~ "``VirtualClientEngine`` attaches to a running" +#~ " Ray instance." +#~ msgstr "" +#~ "将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 " +#~ "`start_simulation `_ ,这样 " +#~ ":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" + +#~ msgid "Multi-node simulation good-to-know" +#~ msgstr "了解多节点模拟" #~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your SuperNode container." -#~ msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" +#~ msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" #~ msgid "" -#~ "Similar to the SuperNode image, the " -#~ "ServerApp Docker image comes with a " -#~ "pre-installed version of Flower and " -#~ "serves as a base for building your" -#~ " own ServerApp image." +#~ "User ``ray status`` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the ``VirtualClientEngine``." #~ msgstr "" -#~ "与 SuperNode 映像类似,ServerApp Docker 映像也预装了 " -#~ "Flower 版本,可作为构建自己的 ServerApp 映像的基础。" +#~ "使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 " +#~ ":code:`VirtualClientEngine` 可用的总资源。" + +#~ msgid "Considerations for simulations" +#~ msgstr "模拟的注意事项" #~ msgid "" -#~ "We will use the same ``quickstart-" -#~ "pytorch`` example as we do in the" -#~ " Flower SuperNode section. If you " -#~ "have not already done so, please " -#~ "follow the `SuperNode Prerequisites`_ before" -#~ " proceeding." +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." +#~ msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" + +#~ msgid "" +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." #~ msgstr "" -#~ "我们将使用与 \"Flower SuperNode \"部分相同的 " -#~ "\"quickstart-pytorch \"示例。如果您还没有这样做,请在继续之前遵循 " -#~ "\"SuperNode 先决条件\"。" +#~ "当前的 VCE " +#~ "允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU " +#~ "节点上训练复杂的 FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 " +#~ "Flower 设计 FL 时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" + +#~ msgid "GPU resources" +#~ msgstr "GPU 资源" + +#~ msgid "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key ``num_gpus`` in ``client_resources``." +#~ " This being said, Ray (used " +#~ "internally by the VCE) is by " +#~ "default:" +#~ msgstr "" +#~ "VCE 会为指定 :code:`client_resources` 中 " +#~ ":code:`num_gpus` 关键字的客户端分配 GPU 内存份额。也就是说,Ray(VCE " +#~ "内部使用)是默认的:" + +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set ``num_gpus=0.5`` and " +#~ "you have two GPUs in your system" +#~ " with different (e.g. 32GB and 8GB)" +#~ " VRAM amounts, they both would run" +#~ " 2 clients concurrently." +#~ msgstr "" +#~ "不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 " +#~ ":code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 8GB)VRAM " +#~ "的 GPU,它们都将同时运行 2 个客户端。" + +#~ msgid "" +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" +#~ msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" + +#~ msgid "" +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" +#~ msgstr "" +#~ "您的 Flower 服务器可能需要 GPU 来评估聚合后的 " +#~ "\"全局模型\"(例如在使用 \"评估方法\"`_时)" + +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ "``CUDA_VISIBLE_DEVICES=\"\"`` when launching " +#~ "your experiment." +#~ msgstr "" +#~ "如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" + +#~ msgid "" +#~ "In addition, the GPU resource limits " +#~ "passed to ``client_resources`` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." +#~ msgstr "" +#~ "此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是" +#~ " \"强制 \"的(即可以超出),这可能导致客户端使用的 VRAM 超过启动模拟时指定的比例。" + +#~ msgid "TensorFlow with GPUs" +#~ msgstr "使用 GPU 的 TensorFlow" + +#~ msgid "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." +#~ msgstr "" +#~ "在 TensorFlow `_ " +#~ "中使用 GPU 时,几乎所有进程可见的 GPU 内存都将被映射。TensorFlow " +#~ "这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 " +#~ "\"虚拟 \"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " +#~ "`_来禁用这一默认行为。" + +#~ msgid "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of ``actor_kwargs`` we can pass" +#~ " the reserved key `\"on_actor_init_fn\"` in" +#~ " order to specify a function to " +#~ "be executed upon actor initialization. " +#~ "In this case, to enable GPU growth" +#~ " for TF workloads. It would look " +#~ "as follows:" +#~ msgstr "" +#~ "这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " +#~ ":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" +#~ " TF 工作负载的 GPU 增长,它看起来如下:" + +#~ msgid "" +#~ "This is precisely the mechanism used " +#~ "in `Tensorflow/Keras Simulation " +#~ "`_ example." +#~ msgstr "" +#~ "这正是 \"Tensorflow/Keras 模拟 " +#~ "`_\"示例中使用的机制。" -#~ msgid "Creating a ServerApp Dockerfile" -#~ msgstr "创建 ServerApp Dockerfile" +#~ msgid "Multi-node setups" +#~ msgstr "多节点设置" #~ msgid "" -#~ "First, we need to create a " -#~ "Dockerfile in the directory where the" -#~ " ``ServerApp`` code is located. If " -#~ "you use the ``quickstart-pytorch`` " -#~ "example, create a new file called " -#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" -#~ "pytorch``." +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." #~ msgstr "" -#~ "首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用" -#~ " ``quickstart-pytorch`` 示例,请在 ``examples" -#~ "/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.serverapp``" -#~ " 的新文件。" +#~ "VCE 目前不提供控制特定 \"虚拟 " +#~ "\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在" +#~ " FL " +#~ "进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" +#~ " nfs 或数据库)来避免数据重复。" #~ msgid "" -#~ "The ``Dockerfile.serverapp`` contains the " -#~ "instructions that assemble the ServerApp " -#~ "image." -#~ msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." +#~ msgstr "" +#~ "根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " +#~ "客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" +#~ " \"状态\"。" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the ServerApp " -#~ "image tagged ``1.8.0`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. In the last two " -#~ "lines, we copy the ``server.py`` module" -#~ " into the image and set the " -#~ "entry point to ``flower-server-app`` " -#~ "with the argument ``server:app``. The " -#~ "argument is the object reference of " -#~ "the ServerApp (``:``) that " -#~ "will be run inside the ServerApp " -#~ "container." -#~ msgstr "" -#~ "在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 " -#~ "ServerApp 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " -#~ "``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " -#~ "模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 " -#~ "``server:app``。参数是将在 ServerApp 容器内运行的 ServerApp " -#~ "的对象引用(``<模块>:<属性>``)。" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ "``start_server`` function. It is usually " +#~ "recommended to adjust a few parameters" +#~ " during instantiation:" +#~ msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" -#~ msgid "Building the ServerApp Docker image" -#~ msgstr "启动服务器" +#~ msgid "Legacy example guides" +#~ msgstr "旧版指南范例" -#~ msgid "Running the ServerApp Docker image" -#~ msgstr "启动服务器" +#~ msgid "flwr is the Flower command line interface." +#~ msgstr "注册 Flower ClientProxy 实例。" -#~ msgid "Now that we have built the ServerApp image, we can finally run it." -#~ msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" +#~ msgid "Options" +#~ msgstr "解决方案" + +#~ msgid "Install completion for the current shell." +#~ msgstr "当前运行的标识符。" #~ msgid "" -#~ "``--superlink 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" -#~ msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" +#~ "Show completion for the current shell," +#~ " to copy it or customize the " +#~ "installation." +#~ msgstr "" + +#~ msgid "Build a Flower App into a Flower App Bundle (FAB)." +#~ msgstr "" #~ msgid "" -#~ "To test running Flower locally, you " -#~ "can create a `bridge network " -#~ "`__, use the ``--network`` argument" -#~ " and pass the name of the " -#~ "Docker network to run your ServerApps." +#~ "You can run ``flwr build`` without " +#~ "any arguments to bundle the app " +#~ "located in the current directory. " +#~ "Alternatively, you can you can specify" +#~ " a path using the ``--app`` option" +#~ " to bundle an app located at " +#~ "the provided path. For example:" +#~ msgstr "" + +#~ msgid "``flwr build --app ./apps/flower-hello-world``." +#~ msgstr "" + +#~ msgid "Path of the Flower App to bundle into a FAB" +#~ msgstr "" + +#~ msgid "Install a Flower App Bundle." +#~ msgstr "安装Flower" + +#~ msgid "It can be ran with a single FAB file argument:" +#~ msgstr "" + +#~ msgid "``flwr install ./target_project.fab``" #~ msgstr "" -#~ "要测试在本地运行 Flower,可以创建一个 ``bridge network " -#~ "`___,使用 ``--network`` 参数并传递 Docker " -#~ "网络的名称,以运行 ServerApps。" -#~ msgid "" -#~ "Any argument that comes after the " -#~ "tag is passed to the Flower " -#~ "ServerApp binary. To see all available" -#~ " flags that the ServerApp supports, " -#~ "run:" -#~ msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" +#~ msgid "The target install directory can be specified with ``--flwr-dir``:" +#~ msgstr "" -#~ msgid "" -#~ "To enable SSL, we will need to " -#~ "mount a PEM-encoded root certificate " -#~ "into your ServerApp container." -#~ msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +#~ msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +#~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--root-certificates`` " -#~ "flags when starting the container." +#~ "This will install ``target_project`` to " +#~ "``./docs/flwr/``. By default, ``flwr-dir`` " +#~ "is equal to:" #~ msgstr "" -#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " -#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " -#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " -#~ "标志将证书名称传递给服务器。" -#~ msgid "Run with root user privileges" +#~ msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" #~ msgstr "" -#~ msgid "" -#~ "Flower Docker images, by default, run" -#~ " with a non-root user " -#~ "(username/groupname: ``app``, UID/GID: ``49999``)." -#~ " Using root user is not recommended" -#~ " unless it is necessary for specific" -#~ " tasks during the build process. " -#~ "Always make sure to run the " -#~ "container as a non-root user in" -#~ " production to maintain security best " -#~ "practices." +#~ msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" #~ msgstr "" -#~ msgid "**Run a container with root user privileges**" +#~ msgid "``$HOME/.flwr/`` in all other cases" #~ msgstr "" -#~ msgid "**Run the build process with root user privileges**" +#~ msgid "The desired install path." #~ msgstr "" -#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" -#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgid "Arguments" +#~ msgstr "参数解析器" -#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" -#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgid "Optional argument" +#~ msgstr "可选的改进措施" -#~ msgid "d defaults to None." -#~ msgstr "d 默认为 \"无\"。" +#~ msgid "The source FAB file to install." +#~ msgstr "" -#~ msgid "Update R from dict/iterable E and F." -#~ msgstr "根据二进制/可迭代 E 和 F 更新 R。" +#~ msgid "Get logs from a Flower project run." +#~ msgstr "" -#~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" +#~ msgid "Flag to stream or print logs from the Flower run" #~ msgstr "" -#~ ":py:obj:`RUN_DRIVER_API_ENTER " -#~ "`\\" -#~ msgid "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" +#~ msgid "default" +#~ msgstr "工作流程" + +#~ msgid "``True``" #~ msgstr "" -#~ ":py:obj:`RUN_DRIVER_API_LEAVE " -#~ "`\\" -#~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" +#~ msgid "Required argument" +#~ msgstr "构建文档" + +#~ msgid "The Flower run ID to query" +#~ msgstr "加入 Flower 社区" + +#~ msgid "Path of the Flower project to run" +#~ msgstr "" + +#~ msgid "Name of the federation to run the app on" +#~ msgstr "" + +#~ msgid "Create new Flower App." +#~ msgstr "Flower 服务器。" + +#~ msgid "The ML framework to use" #~ msgstr "" -#~ ":py:obj:`RUN_FLEET_API_ENTER " -#~ "`\\" + +#~ msgid "options" +#~ msgstr "解决方案" #~ msgid "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" +#~ "PyTorch | TensorFlow | sklearn | " +#~ "HuggingFace | JAX | MLX | NumPy" +#~ " | FlowerTune | Flower Baseline" #~ msgstr "" -#~ ":py:obj:`RUN_FLEET_API_LEAVE " -#~ "`\\" -#~ msgid ":py:obj:`DRIVER_CONNECT `\\" -#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" +#~ msgid "The Flower username of the author" +#~ msgstr "" -#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" -#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgid "The name of the Flower App" +#~ msgstr "基础镜像的存储库名称。" + +#~ msgid "Run Flower App." +#~ msgstr "Flower 服务器。" + +#~ msgid "Override configuration key-value pairs, should be of the format:" +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" +#~ "`--run-config 'key1=\"value1\" key2=\"value2\"' " +#~ "--run-config 'key3=\"value3\"'`" #~ msgstr "" -#~ ":py:obj:`START_DRIVER_ENTER " -#~ "`\\" #~ msgid "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" +#~ "Note that `key1`, `key2`, and `key3` " +#~ "in this example need to exist " +#~ "inside the `pyproject.toml` in order to" +#~ " be properly overriden." #~ msgstr "" -#~ ":py:obj:`START_DRIVER_LEAVE " -#~ "`\\" #~ msgid "" -#~ "An identifier that can be used " -#~ "when loading a particular data partition" -#~ " for a ClientApp. Making use of " -#~ "this identifier is more relevant when" -#~ " conducting simulations." -#~ msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" +#~ "Use `--stream` with `flwr run` to " +#~ "display logs; logs are not streamed " +#~ "by default." +#~ msgstr "" -#~ msgid ":py:obj:`partition_id `\\" -#~ msgstr ":py:obj:`partition_id `\\" +#~ msgid "``False``" +#~ msgstr "``FLWR_VERSION``" -#~ msgid "An identifier telling which data partition a ClientApp should use." -#~ msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" +#~ msgid "Path of the Flower App to run." +#~ msgstr "基础镜像的存储库名称。" -#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" -#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgid "Name of the federation to run the app on." +#~ msgstr "" -#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." -#~ msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" +#~ msgid "" +#~ "Note that since version ``1.11.0``, " +#~ "``flower-server-app`` no longer supports" +#~ " passing a reference to a `ServerApp`" +#~ " attribute. Instead, you need to pass" +#~ " the path to Flower app via the" +#~ " argument ``--app``. This is the path" +#~ " to a directory containing a " +#~ "`pyproject.toml`. You can create a valid" +#~ " Flower app by executing ``flwr new``" +#~ " and following the prompt." +#~ msgstr "" -#~ msgid "run\\_driver\\_api" -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "A config (key/value mapping) held by " +#~ "the entity in a given run and " +#~ "that will stay local. It can be" +#~ " used at any point during the " +#~ "lifecycle of this entity (e.g. across" +#~ " multiple rounds)" +#~ msgstr "" -#~ msgid "run\\_fleet\\_api" -#~ msgstr "run\\_fleet\\_api" +#~ msgid "" +#~ ":py:obj:`RUN_SUPEREXEC_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_SUPERLINK_ENTER " +#~ "`\\" #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg+ configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ ":py:obj:`RUN_SUPEREXEC_LEAVE " +#~ "`\\" #~ msgstr "" -#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ " -#~ "配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" +#~ ":py:obj:`RUN_SUPERLINK_LEAVE " +#~ "`\\" -#~ msgid "key shares." -#~ msgstr "关键股份。" +#~ msgid "Log error stating that module `ray` could not be imported." +#~ msgstr "" #~ msgid "" -#~ "The protocol involves four main stages:" -#~ " - 'setup': Send SecAgg configuration " -#~ "to clients and collect their public " -#~ "keys. - 'share keys': Broadcast public" -#~ " keys among clients and collect " -#~ "encrypted secret" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing JAX workload. " +#~ "We are using JAX to train a " +#~ "linear regression model on a scikit-" +#~ "learn dataset. We will structure the " +#~ "example similar to our `PyTorch - " +#~ "From Centralized To Federated " +#~ "`_ walkthrough. " +#~ "First, we build a centralized training" +#~ " approach based on the `Linear " +#~ "Regression with JAX " +#~ "`_" +#~ " tutorial`. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" -#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg " -#~ "配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" +#~ "本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 " +#~ "JAX 在 scikit-learn 数据集上训练线性回归模型。我们将采用与 " +#~ "`PyTorch - 从集中式到联邦式 " +#~ "`_ " +#~ "教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " +#~ "`_" +#~ " 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" #~ msgid "" -#~ "'A dictionary, e.g {\"\": , " -#~ "\"\": } to configure a " -#~ "backend. Values supported in are" -#~ " those included by " -#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " ``jax``, ``jaxlib``, ``scikit-learn``, and" +#~ " ``flwr``:" #~ msgstr "" -#~ "字典,例如 {\"\": , \"\": " -#~ "} 来配置后端。 中支持的值是 " -#~ "`flwr.common.typing.ConfigsRecordValues`中包含的值。" +#~ "在开始构建 JAX 示例之前,我们需要安装软件包 " +#~ ":code:`jax`、:code:`jaxlib`、:code:`scikit-learn` 和 " +#~ ":code:`flwr`:" -#~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." -#~ msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" +#~ msgid "Linear Regression with JAX" +#~ msgstr "使用 JAX 进行线性回归" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Convolutional Neural " -#~ "Network on CIFAR10 using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a ``Linear Regression`` model. If" +#~ " you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." +#~ msgstr "" +#~ "首先,我们将简要介绍基于 :code:`Linear Regression` " +#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `JAX 文档 " +#~ "`_。" #~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight-updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." -#~ msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" +#~ "Let's create a new file called " +#~ "``jax_training.py`` with all the components" +#~ " required for a traditional (centralized)" +#~ " linear regression training. First, the " +#~ "JAX packages ``jax`` and ``jaxlib`` need" +#~ " to be imported. In addition, we " +#~ "need to import ``sklearn`` since we " +#~ "use ``make_regression`` for the dataset " +#~ "and ``train_test_split`` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the ``flwr`` package " +#~ "for federated learning. This will be " +#~ "done later." +#~ msgstr "" +#~ "让我们创建一个名为 :code:`jax_training.py` " +#~ "的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 JAX 包 " +#~ ":code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 " +#~ ":code:`sklearn`,因为我们使用 :code:`make_regression` 创建数据集,并使用" +#~ " :code:`train_test_split` 将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的" +#~ " :code:`flwr` 软件包,这将在稍后完成。" #~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started. We first need to install " -#~ "Flower. You can do this by running" -#~ " :" -#~ msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" +#~ "The ``load_data()`` function loads the " +#~ "mentioned training and test sets." +#~ msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead and install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" +#~ "The model architecture (a very simple" +#~ " ``Linear Regression`` model) is defined" +#~ " in ``load_model()``." +#~ msgstr "" +#~ "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 " +#~ ":code:`load_model()` 中定义。" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Deep Learning with" -#~ " PyTorch " -#~ "`_." +#~ "We now need to define the training" +#~ " (function ``train()``), which loops over" +#~ " the training set and measures the" +#~ " loss (function ``loss_fn()``) for each " +#~ "batch of training examples. The loss " +#~ "function is separate since JAX takes " +#~ "derivatives with a ``grad()`` function " +#~ "(defined in the ``main()`` function and" +#~ " called in ``train()``)." #~ msgstr "" -#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的《Deep Learning with PyTorch " -#~ "`_》。" +#~ "现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " +#~ ":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` " +#~ "函数提取导数(在 :code:`main()` 函数中定义,并在 :code:`train()` " +#~ "中调用),因此损失函数是独立的。" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and PyTorch related " -#~ "packages:" -#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" +#~ "The evaluation of the model is " +#~ "defined in the function ``evaluation()``. " +#~ "The function takes all test examples " +#~ "and measures the loss of the " +#~ "linear regression model." +#~ msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" -#~ msgid "In addition, we define the device allocation in PyTorch with:" -#~ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the ``jax.grad()`` function" +#~ " is defined in ``main()`` and passed" +#~ " to ``train()``." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " +#~ "训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` " +#~ "中定义,并传递给 :code:`train()`。" + +#~ msgid "You can now run your (centralized) JAX linear regression workload:" +#~ msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" #~ msgid "" -#~ "We use PyTorch to load CIFAR10, a" -#~ " popular colored image classification " -#~ "dataset for machine learning. The " -#~ "PyTorch :code:`DataLoader()` downloads the " -#~ "training and test data that are " -#~ "then normalized." +#~ "So far this should all look fairly" +#~ " familiar if you've used JAX before." +#~ " Let's take the next step and " +#~ "use what we've built to create a" +#~ " simple federated learning system " +#~ "consisting of one server and two " +#~ "clients." #~ msgstr "" -#~ "我们使用 PyTorch 来加载 " -#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " -#~ ":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" +#~ "到目前为止,如果你以前使用过 " +#~ "JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" + +#~ msgid "JAX meets Flower" +#~ msgstr "JAX 结合 Flower" #~ msgid "" -#~ "Define the loss and optimizer with " -#~ "PyTorch. The training of the dataset " -#~ "is done by looping over the " -#~ "dataset, measure the corresponding loss " -#~ "and optimize it." -#~ msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in ``jax_training.py`` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." +#~ msgstr "" +#~ "把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " +#~ ":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" #~ msgid "" -#~ "Define then the validation of the " -#~ "machine learning network. We loop over" -#~ " the test set and measure the " -#~ "loss and accuracy of the test set." -#~ msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" +#~ "Finally, we will define our *client* " +#~ "logic in ``client.py`` and build upon" +#~ " the previously defined JAX training " +#~ "in ``jax_training.py``. Our *client* needs " +#~ "to import ``flwr``, but also ``jax`` " +#~ "and ``jaxlib`` to update the parameters" +#~ " on our JAX model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`jax_training.py` 中定义的 JAX " +#~ "训练为基础。我们的 *client* 需要导入 :code:`flwr`,还需要导入 " +#~ ":code:`jax` 和 :code:`jaxlib` 以更新 JAX " +#~ "模型的参数:" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a PyTorch machine learning model," -#~ " we use the functions for the " -#~ "Flower clients." -#~ msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " ``flwr.client.Client`` or ``flwr.client.NumPyClient``." +#~ " Our implementation will be based on" +#~ " ``flwr.client.NumPyClient`` and we'll call " +#~ "it ``FlowerClient``. ``NumPyClient`` is " +#~ "slightly easier to implement than " +#~ "``Client`` if you use a framework " +#~ "with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. ``FlowerClient`` needs to implement" +#~ " four methods, two methods for " +#~ "getting/setting model parameters, one method" +#~ " for training the model, and one " +#~ "method for testing the model:" +#~ msgstr "" +#~ "实现一个 Flower *client*基本上意味着去实现一个 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`FlowerClient`。如果使用具有良好 NumPy 互操作性的框架(如 " +#~ "JAX),:code:`NumPyClient` 比 " +#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "" -#~ "The Flower clients will use a " -#~ "simple CNN adapted from 'PyTorch: A " -#~ "60 Minute Blitz':" -#~ msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" +#~ msgid "``set_parameters (optional)``" +#~ msgstr ":code:`set_parameters (可选)`" -#~ msgid "" -#~ "After loading the data set with " -#~ ":code:`load_data()` we define the Flower " -#~ "interface." -#~ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" +#~ msgid "transform parameters to NumPy ``ndarray``'s" +#~ msgstr "将参数转换为 NumPy :code:`ndarray`格式" + +#~ msgid "get the updated local model parameters and return them to the server" +#~ msgstr "获取更新后的本地模型参数并返回服务器" + +#~ msgid "return the local loss to the server" +#~ msgstr "向服务器返回本地损失值" #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses " -#~ "PyTorch. Implementing :code:`NumPyClient` usually" -#~ " means defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ "``DeviceArray`` to ``NumPy ndarray`` to " +#~ "make them compatible with `NumPyClient`." +#~ msgstr "" +#~ "具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为" +#~ " :code:`NumPy ndarray`,使其与 `NumPyClient` 兼容。" + +#~ msgid "" +#~ "The two ``NumPyClient`` methods ``fit`` " +#~ "and ``evaluate`` make use of the " +#~ "functions ``train()`` and ``evaluate()`` " +#~ "previously defined in ``jax_training.py``. So" +#~ " what we really do here is we" +#~ " tell Flower through our ``NumPyClient``" +#~ " subclass which of our already " +#~ "defined functions to call for training" +#~ " and evaluation. We included type " +#~ "annotations to give you a better " +#~ "understanding of the data types that " +#~ "get passed around." #~ msgstr "" -#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 " -#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " -#~ "是可选的):" +#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`jax_training.py` " +#~ "中定义的函数 :code:`train()` 和 " +#~ ":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` " +#~ "子类告知 Flower 在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#~ msgid "which can be implemented in the following way:" -#~ msgstr "可以通过以下方式实现:" +#~ msgid "Having defined the federation process, we can run it." +#~ msgstr "定义了联邦进程后,我们就可以运行它了。" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your JAX " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" + +#~ msgid "" +#~ "The source code of this example " +#~ "was improved over time and can be" +#~ " found here: `Quickstart JAX " #~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-" -#~ "pytorch`." +#~ "jax>`_. Our example is somewhat over-" +#~ "simplified because both clients load the" +#~ " same dataset." #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " #~ "`_ 可以在 :code:`examples/quickstart-" -#~ "pytorch` 中找到。" +#~ "jax>`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" #~ msgid "" -#~ "The :code:`self.bst` is used to keep " -#~ "the Booster objects that remain " -#~ "consistent across rounds, allowing them " -#~ "to store predictions from trees " -#~ "integrated in earlier rounds and " -#~ "maintain other essential data structures " -#~ "for training." -#~ msgstr "" -#~ "代码:`self.bst`用于保存在各轮中保持一致的 Booster " -#~ "对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" - -#~ msgid "Implementing a Flower client" -#~ msgstr "实现 Flower 客户端" +#~ "You're now prepared to explore this " +#~ "topic further. How about using a " +#~ "more sophisticated model or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" #~ msgid "" -#~ "To implement the Flower client, we " -#~ "create a subclass of " -#~ "``flwr.client.NumPyClient`` and implement the " -#~ "three methods ``get_parameters``, ``fit``, and" -#~ " ``evaluate``:" +#~ "In this tutorial, we will learn " +#~ "how to train a ``Logistic Regression``" +#~ " model on MNIST using Flower and " +#~ "scikit-learn." #~ msgstr "" -#~ "为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` " -#~ "的子类,并实现了 ``get_parameters``、``fit`` 和``evaluate`` " -#~ "三个方法:" +#~ "在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在" +#~ " MNIST 上训练一个 :code:`Logistic Regression` " +#~ "模型。" #~ msgid "" -#~ "The function ``start_simulation`` accepts a" -#~ " number of arguments, amongst them " -#~ "the ``client_fn`` used to create " -#~ "``FlowerClient`` instances, the number of " -#~ "clients to simulate (``num_clients``), the " -#~ "number of federated learning rounds " -#~ "(``num_rounds``), and the strategy. The " -#~ "strategy encapsulates the federated learning" -#~ " approach/algorithm, for example, *Federated " -#~ "Averaging* (FedAvg)." -#~ msgstr "" -#~ "函数 ``start_simulation`` 接受许多参数,其中包括用于创建 " -#~ "``FlowerClient`` 实例的 " -#~ "``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" -#~ " (FedAvg)。" +#~ "Our example consists of one *server* " +#~ "and two *clients* all having the " +#~ "same model." +#~ msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" #~ msgid "" -#~ "The only thing left to do is " -#~ "to tell the strategy to call this" -#~ " function whenever it receives evaluation" -#~ " metric dictionaries from the clients:" -#~ msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" +#~ "*Clients* are responsible for generating " +#~ "individual model parameter updates for " +#~ "the model based on their local " +#~ "datasets. These updates are then sent" +#~ " to the *server* which will aggregate" +#~ " them to produce an updated global" +#~ " model. Finally, the *server* sends " +#~ "this improved version of the model " +#~ "back to each *client*. A complete " +#~ "cycle of parameters updates is called" +#~ " a *round*." +#~ msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" -#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" -#~ msgstr "" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by " +#~ "running:" +#~ msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#~ msgid "|01471150fd5144c080a176b43e92a3ff|" -#~ msgstr "" +#~ msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#~ msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" -#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" -#~ msgstr "" +#~ msgid "Or simply install all dependencies using Poetry:" +#~ msgstr "或者直接使用 Poetry 安装所有依赖项:" -#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within ``utils.py``. The " +#~ "``utils.py`` contains different functions " +#~ "defining all the machine learning " +#~ "basics:" #~ msgstr "" +#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " +#~ ":code:`utils.py` " +#~ "中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" -#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" -#~ msgstr "" +#~ msgid "``get_model_parameters()``" +#~ msgstr ":code:`get_model_parameters()`" -#~ msgid "|c24c1478b30e4f74839208628a842d1e|" -#~ msgstr "" +#~ msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +#~ msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" -#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" -#~ msgstr "" +#~ msgid "``set_model_params()``" +#~ msgstr ":code:`set_model_params()`" -#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" -#~ msgstr "" +#~ msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +#~ msgstr "设置:code:`sklean`的LogisticRegression模型的参数" -#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -#~ msgstr "" +#~ msgid "``set_initial_params()``" +#~ msgstr ":code:`set_initial_params()`" -#~ msgid "|032eb6fed6924ac387b9f13854919196|" -#~ msgstr "" +#~ msgid "Initializes the model parameters that the Flower server will ask for" +#~ msgstr "初始化 Flower 服务器将要求的模型参数" -#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "" +#~ "Please check out ``utils.py`` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the ``client.py`` and imported. The" +#~ " ``client.py`` also requires to import " +#~ "several packages such as Flower and " +#~ "scikit-learn:" #~ msgstr "" +#~ "更多详情请查看 :code:`utils.py`` 这里 " +#~ "`_。在 :code:`client.py` " +#~ "中使用并导入了预定义函数。:code:`client.py` 还需要导入几个软件包,如 Flower 和" +#~ " scikit-learn:" -#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The ``FederatedDataset.load_partition()`` method " +#~ "loads the partitioned training set for" +#~ " each partition ID defined in the " +#~ "``--partition-id`` argument." #~ msgstr "" +#~ "在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行" +#~ " FL 分区。使用 \"Flower Datasets " +#~ "`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" +#~ " 方法为 :code:`--partition-id` 参数中定义的每个分区 ID" +#~ " 加载分区训练集。" -#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ "``utils.set_initial_params()``." +#~ msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called ``Client``." +#~ " When the server selects a particular" +#~ " client for training, it sends " +#~ "training instructions over the network. " +#~ "The client receives those instructions " +#~ "and calls one of the ``Client`` " +#~ "methods to run your code (i.e., to" +#~ " fit the logistic regression we " +#~ "defined earlier)." #~ msgstr "" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" -#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called ``NumPyClient`` which makes it " +#~ "easier to implement the ``Client`` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing ``NumPyClient`` " +#~ "usually means defining the following " +#~ "methods (``set_parameters`` is optional " +#~ "though):" #~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用" +#~ " scikit-learn 时,它可以让你更容易地实现 :code:`Client` " +#~ "接口。实现 :code:`NumPyClient` " +#~ "通常意味着定义以下方法(:code:`set_parameters` 是可选的):" -#~ msgid "run\\_client\\_app" -#~ msgstr "run\\_client\\_app" +#~ msgid "return the model weight as a list of NumPy ndarrays" +#~ msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#~ msgid "run\\_supernode" -#~ msgstr "flower-superlink" +#~ msgid "``set_parameters`` (optional)" +#~ msgstr ":code:`set_parameters` (可选)" -#~ msgid "Retrieve the corresponding layout by the string key." -#~ msgstr "" +#~ msgid "" +#~ "update the local model weights with " +#~ "the parameters received from the server" +#~ msgstr "用从服务器接收到的参数更新本地模型参数" + +#~ msgid "is directly imported with ``utils.set_model_params()``" +#~ msgstr "直接导入 :code:`utils.set_model_params()`" + +#~ msgid "set the local model weights" +#~ msgstr "设置本地模型参数" + +#~ msgid "train the local model" +#~ msgstr "训练本地模型" + +#~ msgid "return the updated local model weights" +#~ msgstr "接收更新的本地模型参数" + +#~ msgid "test the local model" +#~ msgstr "测试本地模型" + +#~ msgid "The methods can be implemented in the following way:" +#~ msgstr "这些方法可以通过以下方式实现:" #~ msgid "" -#~ "When there isn't an exact match, " -#~ "all the existing keys in the " -#~ "layout map will be treated as a" -#~ " regex and map against the input " -#~ "key again. The first match will be" -#~ " returned, based on the key insertion" -#~ " order. Return None if there isn't" -#~ " any match found." -#~ msgstr "" +#~ "We can now create an instance of" +#~ " our class ``MnistClient`` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" -#~ msgid "the string key as the query for the layout." +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` or " +#~ "``NumPyClient`` and call " +#~ "``fl.client.start_client()``. If you implement " +#~ "a client of type ``NumPyClient`` you'll" +#~ " need to first call its " +#~ "``to_client()`` method. The string " +#~ "``\"0.0.0.0:8080\"`` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use ``\"0.0.0.0:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we pass to the client." #~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ " :code:`server_address`。" -#~ msgid "Corresponding layout based on the query." -#~ msgstr "" +#~ msgid "" +#~ "The following Flower server is a " +#~ "little bit more advanced and returns " +#~ "an evaluation function for the " +#~ "server-side evaluation. First, we import" +#~ " again all required libraries such as" +#~ " Flower and scikit-learn." +#~ msgstr "" +#~ "下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如" +#~ " Flower 和 scikit-learn。" + +#~ msgid "``server.py``, import Flower and start the server:" +#~ msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in ``fit_round()`` and the" +#~ " evaluation is defined in " +#~ "``get_evaluate_fn()``. The evaluation function " +#~ "is called after each federated learning" +#~ " round and gives you information " +#~ "about loss and accuracy. Note that " +#~ "we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" +#~ "联邦学习轮数在 :code:`fit_round()` 中设置,评估在 " +#~ ":code:`get_evaluate_fn()` 中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" + +#~ msgid "" +#~ "The ``main`` contains the server-side" +#~ " parameter initialization " +#~ "``utils.set_initial_params()`` as well as the" +#~ " aggregation strategy ``fl.server.strategy:FedAvg()``." +#~ " The strategy is the default one, " +#~ "federated averaging (or FedAvg), with " +#~ "two clients and evaluation after each" +#~ " federated learning round. The server " +#~ "can be started with the command " +#~ "``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))``." +#~ msgstr "" +#~ ":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " +#~ ":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " +#~ "FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`" +#~ " 命令启动服务器。" -#~ msgid "run\\_server\\_app" -#~ msgstr "run\\_server\\_app" +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We, " +#~ "therefore, have to start the server " +#~ "first:" +#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#~ msgid "run\\_superlink" -#~ msgstr "flower-superlink" +#~ msgid "" +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Open a new terminal and " +#~ "start the first client:" +#~ msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" -#~ msgid "Start a Ray-based Flower simulation server." -#~ msgstr "启动基于 Ray 的Flower模拟服务器。" +#~ msgid "Open another terminal and start the second client:" +#~ msgstr "打开另一台终端,启动第二个客户端:" #~ msgid "" -#~ "A function creating `Client` instances. " -#~ "The function must have the signature " -#~ "`client_fn(context: Context). It should return" -#~ " a single client instance of type " -#~ "`Client`. Note that the created client" -#~ " instances are ephemeral and will " -#~ "often be destroyed after a single " -#~ "method invocation. Since client instances " -#~ "are not long-lived, they should " -#~ "not attempt to carry state over " -#~ "method invocations. Any state required " -#~ "by the instance (model, dataset, " -#~ "hyperparameters, ...) should be (re-)created" -#~ " in either the call to `client_fn`" -#~ " or the call to any of the " -#~ "client methods (e.g., load evaluation " -#~ "data in the `evaluate` method itself)." +#~ "Each client will have its own " +#~ "dataset. You should now see how " +#~ "the training does in the very " +#~ "first terminal (the one that started " +#~ "the server):" +#~ msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in ``examples/sklearn-logreg-" +#~ "mnist``." #~ msgstr "" -#~ "创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个" -#~ " Client " -#~ "类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -#~ " `client_fn` 或任何客户端方法(例如,在 `evaluate` " -#~ "方法中加载评估数据)时(重新)创建。" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可以在 :code:`examples/sklearn-" +#~ "logreg-mnist` 中找到。" -#~ msgid "The total number of clients in this simulation." -#~ msgstr "需要等待的客户数量。" +#~ msgid "Federated XGBoost" +#~ msgstr "联邦化 XGBoost" #~ msgid "" -#~ "UNSUPPORTED, WILL BE REMOVED. USE " -#~ "`num_clients` INSTEAD. List `client_id`s for" -#~ " each client. This is only required" -#~ " if `num_clients` is not set. Setting" -#~ " both `num_clients` and `clients_ids` with" -#~ " `len(clients_ids)` not equal to " -#~ "`num_clients` generates an error. Using " -#~ "this argument will raise an error." +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a :doc:`virtualenv " +#~ "`." #~ msgstr "" -#~ "列出每个客户的 `client_id`。只有在未设置 `num_clients` " -#~ "时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +#~ "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" #~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. To understand the GPU" -#~ " utilization caused by `num_gpus`, as " -#~ "well as using custom resources, please" -#~ " consult the Ray documentation." -#~ msgstr "" -#~ "\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU " -#~ "资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解 " -#~ "`num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray" -#~ " 文档。" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. Now" +#~ " that we have all our dependencies" +#~ " installed, let's run a simple " +#~ "distributed training with two clients " +#~ "and one server." +#~ msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" + +#~ msgid "" +#~ "In a file called ``client.py``, import" +#~ " xgboost, Flower, Flower Datasets and " +#~ "other related functions:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" + +#~ msgid "Dataset partition and hyper-parameter selection" +#~ msgstr "数据集划分和超参数选择" + +#~ msgid "" +#~ "Prior to local training, we require " +#~ "loading the HIGGS dataset from Flower" +#~ " Datasets and conduct data partitioning " +#~ "for FL:" +#~ msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" + +#~ msgid "Finally, we define the hyper-parameters used for XGBoost training." +#~ msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." -#~ msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" +#~ msgid "Flower client definition for XGBoost" +#~ msgstr "用于 XGBoost 的 Flower 客户端定义" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ "All required parameters defined above " +#~ "are passed to ``XgbClient``'s constructor." #~ msgstr "" -#~ "抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server`" -#~ " 将使用 `flwr.server.strategy.FedAvg`。" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use ``get_parameters`` and" +#~ " ``set_parameters`` to initialise model " +#~ "parameters for XGBoost. As a result, " +#~ "let's return an empty tensor in " +#~ "``get_parameters`` when it is called by" +#~ " the server at the first round." #~ msgstr "" -#~ "抽象基类 `flwr.server.ClientManager` " -#~ "的实现。如果没有提供实现,`start_simulation` 将使用 " -#~ "`flwr.server.client_manager.SimpleClientManager`。" +#~ "与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 " +#~ ":code:`get_parameters` 和 :code:`set_parameters` 来初始化" +#~ " XGBoost 的模型参数。因此,当服务器在第一轮调用 :code:`get_parameters` " +#~ "时,让我们在 :code:`get_parameters` 中返回一个空张量。" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ "Now, we can create an instance of" +#~ " our class ``XgbClient`` and add one" +#~ " line to actually run this client:" +#~ msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement ``Client`` and " +#~ "call ``fl.client.start_client()``. The string " +#~ "``\"[::]:8080\"`` tells the client which " +#~ "server to connect to. In our case" +#~ " we can run the server and the" +#~ " client on the same machine, " +#~ "therefore we use ``\"[::]:8080\"``. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the ``server_address`` " +#~ "we point the client at." #~ msgstr "" -#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" -#~ " None(默认值),则将使用以下默认参数初始化 Ray: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } 可以使用空字典(ray_init_args={})来防止向 ray.init " -#~ "传递任何参数。" +#~ "这就是客户端。我们只需实现 :code:`客户端`并调用 " +#~ ":code:`fl.client.start_client()`。字符串 " +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" +#~ " :code:`server_address`。" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ "In a file named ``server.py``, import" +#~ " Flower and FedXgbBagging from " +#~ "``flwr.server.strategy``." #~ msgstr "" -#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" -#~ " None(默认值),则将使用以下默认参数初始化 Ray:" +#~ "在名为 :code:`server.py` 的文件中,从 " +#~ ":code:`flwr.server.strategy` 导入 Flower 和 " +#~ "FedXgbBagging。" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -#~ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "Then, we start the server:" +#~ msgstr "然后,我们启动服务器:" #~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." -#~ msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" +#~ "We also provide an example code " +#~ "(``sim.py``) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" +#~ "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower " +#~ "的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." -#~ msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" +#~ "After importing all required packages, " +#~ "we define a ``main()`` function to " +#~ "perform the simulation process:" +#~ msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "executing a ClientApp wrapping input " -#~ "argument `client_fn`." -#~ msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ "``list``. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" + +#~ msgid "Then, we define the strategies and other hyper-parameters:" +#~ msgstr "然后,我们定义策略和其他超参数:" #~ msgid "" -#~ "If you want to create your own " -#~ "Actor classes, you might need to " -#~ "pass some input argument. You can " -#~ "use this dictionary for such purpose." -#~ msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" +#~ "After that, we start the simulation " +#~ "by calling ``fl.simulation.start_simulation``:" +#~ msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" #~ msgid "" -#~ "(default: \"DEFAULT\") Optional string " -#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" -#~ " to choose in which node the " -#~ "actor is placed. If you are an " -#~ "advanced user needed more control you" -#~ " can use lower-level scheduling " -#~ "strategies to pin actors to specific " -#~ "compute nodes (e.g. via " -#~ "NodeAffinitySchedulingStrategy). Please note this" -#~ " is an advanced feature. For all " -#~ "details, please refer to the Ray " -#~ "documentation: https://docs.ray.io/en/latest/ray-" -#~ "core/scheduling/index.html" +#~ "One of key parameters for " +#~ "``start_simulation`` is ``client_fn`` which " +#~ "returns a function to construct a " +#~ "client. We define it as follows:" #~ msgstr "" -#~ "(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 " -#~ "VCE " -#~ "选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -#~ "NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -#~ "文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +#~ ":code:`start_simulation` 的一个关键参数是 " +#~ ":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" -#~ msgid "**hist** -- Object containing metrics from training." -#~ msgstr "**hist** -- 包含训练指标的对象。" +#~ msgid "" +#~ "In ``utils.py``, we define the arguments" +#~ " parsers for clients, server and " +#~ "simulation, allowing users to specify " +#~ "different experimental settings. Let's first" +#~ " see the sever side:" +#~ msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" + +#~ msgid "Then, the argument parser on client side:" +#~ msgstr "然后是客户端的参数解析器:" + +#~ msgid "We also have an argument parser for simulation:" +#~ msgstr "我们还有一个用于模拟的参数解析器:" + +#~ msgid "This integrates all arguments for both client and server sides." +#~ msgstr "这整合了客户端和服务器端的所有参数。" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with FastAI to train a vision " -#~ "model on CIFAR-10." -#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" +#~ "To run a centralised evaluated " +#~ "experiment with bagging strategy on 5" +#~ " clients with exponential distribution for" +#~ " 50 rounds, we first start the " +#~ "server as below:" +#~ msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" -#~ msgid "Let's build a federated learning system using fastai and Flower!" -#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" +#~ msgid "Then, on each client terminal, we start the clients:" +#~ msgstr "然后,我们在每个客户终端上启动客户机:" + +#~ msgid "To run the same experiment with Flower simulation:" +#~ msgstr "运行与 Flower 模拟相同的实验:" + +#~ msgid "|ac0a9766e26044d6aea222a829859b20|" +#~ msgstr "" + +#~ msgid "|36cd6e248b1443ce8a82b5a025bba368|" +#~ msgstr "" + +#~ msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +#~ msgstr "" + +#~ msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +#~ msgstr "" + +#~ msgid "|7605632e1b0f49599ffacf841491fcfb|" +#~ msgstr "" + +#~ msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +#~ msgstr "" + +#~ msgid "|5405ed430e4746e28b083b146fb71731|" +#~ msgstr "" + +#~ msgid "|a389e87dab394eb48a8949aa2397687b|" +#~ msgstr "" + +#~ msgid "|89c412136a5146ec8dc32c0973729f12|" +#~ msgstr "" + +#~ msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +#~ msgstr "" + +#~ msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +#~ msgstr "" + +#~ msgid "|a7579ad7734347508e959d9e14f2f53d|" +#~ msgstr "" + +#~ msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +#~ msgstr "" + +#~ msgid "|55472eef61274ba1b739408607e109df|" +#~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn more." +#~ "When operating in a production " +#~ "environment, it is strongly recommended " +#~ "to enable Transport Layer Security (TLS)" +#~ " for each Flower Component to ensure" +#~ " secure communication." #~ msgstr "" -#~ "请参阅 `完整代码示例 " -#~ "`_了解更多信息。" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using Hugging Face Transformers and " -#~ "Flower!" -#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container:" +#~ msgstr "" -#~ msgid "Dependencies" -#~ msgstr "依赖关系" +#~ msgid "" +#~ "``--volume ./certificates/:/app/certificates/:ro``: Mount" +#~ " the ``certificates`` directory in" +#~ msgstr "" #~ msgid "" -#~ "To follow along this tutorial you " -#~ "will need to install the following " -#~ "packages: :code:`datasets`, :code:`evaluate`, " -#~ ":code:`flwr`, :code:`torch`, and " -#~ ":code:`transformers`. This can be done " -#~ "using :code:`pip`:" +#~ "the current working directory of the " +#~ "host machine as a read-only volume" +#~ " at the" #~ msgstr "" -#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " -#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" -#~ " :code:`pip` 来完成:" -#~ msgid "Standard Hugging Face workflow" -#~ msgstr "标准Hugging Face工作流程" +#~ msgid "``/app/certificates`` directory inside the container." +#~ msgstr "" -#~ msgid "Handling the data" -#~ msgstr "处理数据" +#~ msgid "" +#~ "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the " +#~ "``ca.crt`` file from the" +#~ msgstr "" #~ msgid "" -#~ "To fetch the IMDB dataset, we will" -#~ " use Hugging Face's :code:`datasets` " -#~ "library. We then need to tokenize " -#~ "the data and create :code:`PyTorch` " -#~ "dataloaders, this is all done in " -#~ "the :code:`load_data` function:" +#~ "current working directory of the host" +#~ " machine as a read-only volume " +#~ "at the ``/app/ca.crt``" #~ msgstr "" -#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " -#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " -#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" -#~ msgid "Training and testing the model" -#~ msgstr "训练和测试模型" +#~ msgid "SuperExec" +#~ msgstr "" #~ msgid "" -#~ "Once we have a way of creating " -#~ "our trainloader and testloader, we can" -#~ " take care of the training and " -#~ "testing. This is very similar to " -#~ "any :code:`PyTorch` training or testing " -#~ "loop:" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory where" +#~ " the SuperExec will be executed from," +#~ " we can use the flag ``--volume`` " +#~ "to mount the local directory into " +#~ "the ``/app/certificates/`` directory of the" +#~ " container:" #~ msgstr "" -#~ "有了创建 trainloader 和 testloader " -#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" - -#~ msgid "Creating the model itself" -#~ msgstr "创建模型本身" #~ msgid "" -#~ "To create the model itself, we " -#~ "will just load the pre-trained " -#~ "distillBERT model using Hugging Face’s " -#~ ":code:`AutoModelForSequenceClassification` :" +#~ ":substitution-code:`flwr/superexec:|stable_flwr_version|`: " +#~ "The name of the image to be " +#~ "run and the specific" #~ msgstr "" -#~ "要创建模型本身,我们只需使用 Hugging Face 的 " -#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " -#~ "distillBERT 模型:" -#~ msgid "Creating the IMDBClient" -#~ msgstr "创建 IMDBClient" +#~ msgid "SuperExec." +#~ msgstr "" #~ msgid "" -#~ "To federate our example to multiple " -#~ "clients, we first need to write " -#~ "our Flower client class (inheriting from" -#~ " :code:`flwr.client.NumPyClient`). This is very" -#~ " easy, as our model is a " -#~ "standard :code:`PyTorch` model:" +#~ "``--ssl-certfile certificates/server.pem``: Specify" +#~ " the location of the SuperExec's" #~ msgstr "" -#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " -#~ ":code:`PyTorch` 模型:" #~ msgid "" -#~ "The :code:`get_parameters` function lets the" -#~ " server get the client's parameters. " -#~ "Inversely, the :code:`set_parameters` function " -#~ "allows the server to send its " -#~ "parameters to the client. Finally, the" -#~ " :code:`fit` function trains the model " -#~ "locally for the client, and the " -#~ ":code:`evaluate` function tests the model " -#~ "locally and returns the relevant " -#~ "metrics." +#~ "The ``certificates/server.pem`` file is used" +#~ " to identify the SuperExec and to " +#~ "encrypt the" #~ msgstr "" -#~ ":code:`get_parameters` " -#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" -#~ msgid "Starting the server" -#~ msgstr "启动服务器" +#~ msgid "" +#~ "``--ssl-keyfile certificates/server.key``: Specify" +#~ " the location of the SuperExec's" +#~ msgstr "" #~ msgid "" -#~ "Now that we have a way to " -#~ "instantiate clients, we need to create" -#~ " our server in order to aggregate " -#~ "the results. Using Flower, this can " -#~ "be done very easily by first " -#~ "choosing a strategy (here, we are " -#~ "using :code:`FedAvg`, which will define " -#~ "the global weights as the average " -#~ "of all the clients' weights at " -#~ "each round) and then using the " -#~ ":code:`flwr.server.start_server` function:" +#~ "``--executor-config root-" +#~ "certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify" +#~ " the" #~ msgstr "" -#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #~ msgid "" -#~ "The :code:`weighted_average` function is there" -#~ " to provide a way to aggregate " -#~ "the metrics distributed amongst the " -#~ "clients (basically this allows us to " -#~ "display a nice average accuracy and " -#~ "loss for every round)." +#~ "location of the CA certificate file " +#~ "inside the container that the SuperExec" +#~ " executor" #~ msgstr "" -#~ "使用 :code:`weighted_average` " -#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" -#~ msgid "Putting everything together" -#~ msgstr "把所有东西放在一起" +#~ msgid "should use to verify the SuperLink's identity." +#~ msgstr "" -#~ msgid "We can now start client instances using:" -#~ msgstr "现在我们可以使用:" +#~ msgid "" +#~ "In this mode, the ClientApp is " +#~ "executed as a subprocess within the " +#~ "SuperNode Docker container, rather than " +#~ "running in a separate container. This" +#~ " approach reduces the number of " +#~ "running containers, which can be " +#~ "beneficial for environments with limited " +#~ "resources. However, it also means that" +#~ " the ClientApp is no longer isolated" +#~ " from the SuperNode, which may " +#~ "introduce additional security concerns." +#~ msgstr "" #~ msgid "" -#~ "And they will be able to connect" -#~ " to the server and start the " -#~ "federated training." -#~ msgstr "他们就能连接到服务器,开始联邦训练。" +#~ "Before running the ClientApp as a " +#~ "subprocess, ensure that the FAB " +#~ "dependencies have been installed in the" +#~ " SuperNode images. This can be done" +#~ " by extending the SuperNode image:" +#~ msgstr "" + +#~ msgid "Dockerfile.supernode" +#~ msgstr "Flower 服务器" + +#~ msgid "Run the ClientApp as a Subprocess" +#~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the `full code example " -#~ "`_ ." +#~ "Start the SuperNode with the flag " +#~ "``--isolation subprocess``, which tells the" +#~ " SuperNode to execute the ClientApp " +#~ "as a subprocess:" +#~ msgstr "" + +#~ msgid "Run the example and follow the logs of the ServerApp:" #~ msgstr "" -#~ "如果您想查看所有内容,请查看完整的代码示例: " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." #~ msgid "" -#~ "Of course, this is a very basic" -#~ " example, and a lot can be " -#~ "added or modified, it was just to" -#~ " showcase how simply we could " -#~ "federate a Hugging Face workflow using" -#~ " Flower." +#~ "That is all it takes! You can " +#~ "monitor the progress of the run " +#~ "through the logs of the SuperExec." #~ msgstr "" -#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " -#~ "联合Hugging Face的工作流程。" #~ msgid "" -#~ "Note that in this example we used" -#~ " :code:`PyTorch`, but we could have " -#~ "very well used :code:`TensorFlow`." -#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" +#~ "You will learn how to run the " +#~ "Flower client and server components on" +#~ " two separate machines, with Flower " +#~ "configured to use TLS encryption and " +#~ "persist SuperLink state across restarts. " +#~ "A server consists of a SuperLink " +#~ "and ``SuperExec``. For more details " +#~ "about the Flower architecture, refer to" +#~ " the :doc:`../explanation-flower-architecture`" +#~ " explainer page." +#~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with PyTorch Lightning to train an " -#~ "Auto Encoder model on MNIST." -#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" +#~ "First, set the environment variables " +#~ "``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the" +#~ " IP address from the remote machine." +#~ " For example, if the IP is " +#~ "``192.168.2.33``, execute:" +#~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using PyTorch Lightning " -#~ "and Flower!" -#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" +#~ "Log into the remote machine using " +#~ "``ssh`` and run the following command" +#~ " to start the SuperLink and SuperExec" +#~ " services:" +#~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "Specify the remote SuperExec IP " +#~ "addresses and the path to the root" +#~ " certificate in the ``[tool.flwr.federations" +#~ ".remote-superexec]`` table in the " +#~ "``pyproject.toml`` file. Here, we have " +#~ "named our remote federation ``remote-" +#~ "superexec``:" #~ msgstr "" -#~ "请参阅 `完整代码示例 " -#~ "`_ 了解更多信息。" - -#~ msgid "Let's build a federated learning system in less than 20 lines of code!" -#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" -#~ msgid "Before Flower can be imported we have to install it:" -#~ msgstr "在导入 Flower 之前,我们必须先安装它:" +#~ msgid "Run the project and follow the ServerApp logs:" +#~ msgstr "" #~ msgid "" -#~ "Since we want to use the Keras " -#~ "API of TensorFlow (TF), we have to" -#~ " install TF as well:" -#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" +#~ "``-p 9091:9091 -p 9092:9092``: Map port" +#~ " ``9091`` and ``9092`` of the " +#~ "container to the same port of" +#~ msgstr "" -#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" +#~ msgid "the host machine, allowing other services to access the Driver API on" +#~ msgstr "" #~ msgid "" -#~ "We use the Keras utilities of TF" -#~ " to load CIFAR10, a popular colored" -#~ " image classification dataset for machine" -#~ " learning. The call to " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " -#~ "CIFAR10, caches it locally, and then " -#~ "returns the entire training and test " -#~ "set as NumPy ndarrays." +#~ "``http://localhost:9091`` and the Fleet API" +#~ " on ``http://localhost:9092``." #~ msgstr "" -#~ "我们使用 TF 的 Keras 实用程序加载 " -#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " -#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" #~ msgid "" -#~ "Next, we need a model. For the " -#~ "purpose of this tutorial, we use " -#~ "MobilNetV2 with 10 output classes:" -#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +#~ "``flwr/supernode:|stable_flwr_version|``: This is " +#~ "the name of the image to be " +#~ "run and the specific tag" +#~ msgstr "" #~ msgid "" -#~ "The Flower server interacts with clients" -#~ " through an interface called " -#~ ":code:`Client`. When the server selects " -#~ "a particular client for training, it " -#~ "sends training instructions over the " -#~ "network. The client receives those " -#~ "instructions and calls one of the " -#~ ":code:`Client` methods to run your code" -#~ " (i.e., to train the neural network" -#~ " we defined earlier)." +#~ "``--supernode-address 0.0.0.0:9094``: Set the" +#~ " address and port number that the " +#~ "SuperNode" #~ msgstr "" -#~ "Flower 服务器通过一个名为 :code:`Client` " -#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " -#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" -#~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses Keras." -#~ " The :code:`NumPyClient` interface defines " -#~ "three methods which can be implemented" -#~ " in the following way:" +#~ msgid "is listening on." +#~ msgstr "" + +#~ msgid "Step 4: Start the ClientApp" #~ msgstr "" -#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " -#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`CifarClient` and add " -#~ "one line to actually run this " -#~ "client:" -#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +#~ "The ClientApp Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own ClientApp image. In" +#~ " order to install the FAB " +#~ "dependencies, you will need to create" +#~ " a Dockerfile that extends the " +#~ "ClientApp image and installs the " +#~ "required dependencies." +#~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. If you implement" -#~ " a client of type :code:`NumPyClient` " -#~ "you'll need to first call its " -#~ ":code:`to_client()` method. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "Create a ClientApp Dockerfile called " +#~ "``Dockerfile.clientapp`` and paste the " +#~ "following code into it:" #~ msgstr "" -#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" -#~ " 并调用 :code:`fl.client.start_client()` 或 " -#~ ":code:`fl.client.start_numpy_client()`。字符串 " -#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" -#~ " :code:`server_address`。" -#~ msgid "Each client will have its own dataset." -#~ msgstr "每个客户都有自己的数据集。" +#~ msgid "Dockerfile.clientapp" +#~ msgstr "Flower 客户端。" #~ msgid "" -#~ "You should now see how the " -#~ "training does in the very first " -#~ "terminal (the one that started the " -#~ "server):" -#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" +#~ "to be built from is the " +#~ "``flwr/clientapp image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." +#~ msgstr "" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this can be " -#~ "found in :code:`examples/quickstart-" -#~ "tensorflow/client.py`." +#~ "``--supernode supernode-1:9094``: Connect to " +#~ "the SuperNode's Fleet API at the " +#~ "address" #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -#~ "`_ 可以在 :code:`examples/quickstart-" -#~ "tensorflow/client.py` 中找到。" -#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgid "``supernode-1:9094``." #~ msgstr "" -#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgid "" +#~ "Similar to the ClientApp image, you " +#~ "will need to create a Dockerfile " +#~ "that extends the SuperExec image and " +#~ "installs the required FAB dependencies." #~ msgstr "" -#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgid "" +#~ "Create a SuperExec Dockerfile called " +#~ "``Dockerfile.superexec`` and paste the " +#~ "following code in:" #~ msgstr "" -#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgid "Dockerfile.superexec" #~ msgstr "" -#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgid "" +#~ ":substitution-code:`FROM " +#~ "flwr/superexec:|stable_flwr_version|`: This line " +#~ "specifies that the Docker image" #~ msgstr "" -#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgid "" +#~ "to be built from is the " +#~ "``flwr/superexec image``, version :substitution-" +#~ "code:`|stable_flwr_version|`." #~ msgstr "" -#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgid "" +#~ "``ENTRYPOINT [\"flower-superexec\"``: Set the" +#~ " command ``flower-superexec`` to be" #~ msgstr "" -#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" #~ msgstr "" -#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." #~ msgstr "" -#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgid "" +#~ "Afterward, in the directory that holds" +#~ " the Dockerfile, execute this Docker " +#~ "command to build the SuperExec image:" #~ msgstr "" -#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgid "" +#~ "``-p 9093:9093``: Map port ``9093`` of" +#~ " the container to the same port " +#~ "of" #~ msgstr "" -#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgid "" +#~ "the host machine, allowing you to " +#~ "access the SuperExec API on " +#~ "``http://localhost:9093``." #~ msgstr "" -#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgid "``--name superexec``: Assign the name ``superexec`` to the container." #~ msgstr "" -#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgid "" +#~ "``--executor-config superlink=\\\"superlink:9091\\\"``:" +#~ " Configure the SuperExec executor to" #~ msgstr "" -#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgid "connect to the SuperLink running on port ``9091``." #~ msgstr "" -#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgid "Launch two new ClientApp containers based on the newly built image:" #~ msgstr "" -#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgid "" +#~ "Setting the ``PROJECT_DIR`` helps Docker " +#~ "Compose locate the ``pyproject.toml`` file," +#~ " allowing it to install dependencies " +#~ "in the SuperExec and SuperNode images" +#~ " correctly." #~ msgstr "" -#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgid "" +#~ "To ensure the ``flwr`` CLI connects " +#~ "to the SuperExec, you need to " +#~ "specify the SuperExec addresses in the" +#~ " ``pyproject.toml`` file." #~ msgstr "" -#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgid "" +#~ "Run the quickstart example, monitor the" +#~ " ServerApp logs and wait for the " +#~ "summary to appear:" #~ msgstr "" -#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgid "In the SuperExec logs, you should find the ``Get weights`` line:" #~ msgstr "" -#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgid "Step 7: Add another SuperNode" #~ msgstr "" -#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgid "" +#~ "You can add more SuperNodes and " +#~ "ClientApps by duplicating their definitions" +#~ " in the ``compose.yml`` file." #~ msgstr "" -#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgid "" +#~ "Just give each new SuperNode and " +#~ "ClientApp service a unique service name" +#~ " like ``supernode-3``, ``clientapp-3``, etc." #~ msgstr "" -#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgid "In ``compose.yml``, add the following:" #~ msgstr "" -#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgid "" +#~ "If you also want to enable TLS " +#~ "for the new SuperNodes, duplicate the" +#~ " SuperNode definition for each new " +#~ "SuperNode service in the ``with-" +#~ "tls.yml`` file." #~ msgstr "" -#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgid "" +#~ "Make sure that the names of the" +#~ " services match with the one in " +#~ "the ``compose.yml`` file." #~ msgstr "" -#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgid "In ``with-tls.yml``, add the following:" #~ msgstr "" -#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" #~ msgstr "" #~ msgid "" -#~ "Some quickstart examples may have " -#~ "limitations or requirements that prevent " -#~ "them from running on every environment." -#~ " For more information, please see " -#~ "`Limitations`_." +#~ "This guide is for users who have" +#~ " already worked with Flower 0.x and" +#~ " want to upgrade to Flower 1.0. " +#~ "Newer versions of Flower (1.12+) are " +#~ "based on a new architecture (previously" +#~ " called Flower Next) and not covered" +#~ " in this guide. After upgrading " +#~ "Flower 0.x projects to Flower 1.0, " +#~ "please refer to :doc:`Upgrade to Flower" +#~ " Next ` to make your project compatible" +#~ " with the lastest version of Flower." #~ msgstr "" +#~ msgid "Upgrade to Flower Next" +#~ msgstr "升级至 Flower 1.0" + #~ msgid "" -#~ "Change the application code. For " -#~ "example, change the ``seed`` in " -#~ "``quickstart_docker/task.py`` to ``43`` and " -#~ "save it:" +#~ "This guide shows how to reuse " +#~ "pre-``1.8`` Flower code with minimum " +#~ "code changes by using the *compatibility" +#~ " layer* in Flower Next. In another" +#~ " guide, we will show how to run" +#~ " Flower Next end-to-end with " +#~ "pure Flower Next APIs." #~ msgstr "" +#~ "本指南展示了如何通过使用 Flower Next " +#~ "中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " +#~ "代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行" +#~ " Flower Next。" -#~ msgid ":code:`fit`" -#~ msgstr ":code:`fit`" +#~ msgid "Using Poetry" +#~ msgstr "使用 pip" #~ msgid "" -#~ "Note that since version :code:`1.11.0`, " -#~ ":code:`flower-server-app` no longer " -#~ "supports passing a reference to a " -#~ "`ServerApp` attribute. Instead, you need " -#~ "to pass the path to Flower app " -#~ "via the argument :code:`--app`. This is" -#~ " the path to a directory containing" -#~ " a `pyproject.toml`. You can create a" -#~ " valid Flower app by executing " -#~ ":code:`flwr new` and following the " -#~ "prompt." +#~ "Update the ``flwr`` dependency in " +#~ "``pyproject.toml`` and then reinstall (don't" +#~ " forget to delete ``poetry.lock`` via " +#~ "``rm poetry.lock`` before running ``poetry " +#~ "install``)." #~ msgstr "" +#~ "Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` " +#~ "依赖包,然后重新安装(运行 ``poetry install`` 前,别忘了通过 ``rm" +#~ " poetry.lock` 删除 ``poetry.lock`)。" #~ msgid "" -#~ "All required parameters defined above " -#~ "are passed to :code:`XgbClient`'s constructor." +#~ "Ensure you set the following version " +#~ "constraint in your ``pyproject.toml``:" +#~ msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" + +#~ msgid "" +#~ "This function is deprecated since " +#~ "1.13.0. Use :code: `flwr run` to " +#~ "start a Flower simulation." #~ msgstr "" -#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgid "|c9344c3dfee24383908fabaac40a8504|" #~ msgstr "" -#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgid "|c10cd8f2177641bd8091c7b76d318ff9|" #~ msgstr "" -#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgid "|3c59c315e67945ea8b839381c5deb6c2|" #~ msgstr "" -#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgid "|eadf87e1e20549789512f7aa9199fcff|" #~ msgstr "" -#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgid "|66ce8f21aeb443fca1fc88f727458417|" #~ msgstr "" -#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgid "|f5768015a1014396b4761bb6cb3677f5|" #~ msgstr "" -#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgid "|a746aa3f56064617a4e00f4c6a0cb140|" #~ msgstr "" -#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgid "|cf8f676dd3534a44995c1b40910fd030|" #~ msgstr "" -#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgid "|d1c0e3a4c9dc4bfd88ee6f1fe626edaf|" #~ msgstr "" -#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgid "|1d8d6298a4014ec3a717135bcc7a94f9|" #~ msgstr "" -#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgid "|e3ea79200ff44d459358b9f4713e582b|" #~ msgstr "" -#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgid "|3e1061718a4a49d485764d30a4bfecdd|" #~ msgstr "" -#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgid "|7750e597d1ea4e319f7e0a40539bf214|" #~ msgstr "" -#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgid "|dd4434075f374e99ac07f509a883778f|" #~ msgstr "" -#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgid "Other changes" +#~ msgstr "不兼容的更改" + +#~ msgid "|cf5fe148406b44b9a8b842fb01b5a7ea|" #~ msgstr "" -#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgid "|ba25c91426d64cc1ae2d3febc5715b35|" #~ msgstr "" -#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgid "|fca67f83aaab4389aa9ebb4d9c5cd75e|" #~ msgstr "" -#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgid "|6f2e8f95c95443379b0df00ca9824654|" #~ msgstr "" -#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgid "|c0ab3a1a733d4dbc9e1677aa608e8038|" #~ msgstr "" -#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgid "|8f0491bde07341ab9f2e23d50593c0be|" #~ msgstr "" -#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgid "|762fc099899943688361562252c5e600|" #~ msgstr "" -#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgid "|f62d365fd0ae405b975d3ca01e7183fd|" #~ msgstr "" -#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgid "|2c78fc1816b143289f4d909388f92a80|" #~ msgstr "" -#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgid "|4230725aeebe497d8ad84a3efc2a912b|" #~ msgstr "" -#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgid "|64b66a88417240eabe52f5cc55d89d0b|" #~ msgstr "" -#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgid "|726c8eca58bc4f859b06aa24a587b253|" #~ msgstr "" -#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgid "|f9d869e4b33c4093b29cf24ed8dff80a|" #~ msgstr "" -#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgid "|4ab50bc01a9f426a91a2c0cbc3ab7a84|" #~ msgstr "" -#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgid "Request for Flower Baselines" +#~ msgstr "Flower Baselines的申请" + +#~ msgid "Request for examples" +#~ msgstr "示例请求" + +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +#~ msgstr "微调 Llama 2,使用 Hugging Face Transformers 和 PyTorch" + +#~ msgid "Android ONNX on-device training" +#~ msgstr "安卓 ONNX 设备上训练" + +#~ msgid "|f150b8d6e0074250822c9f6f7a8de3e0|" #~ msgstr "" -#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgid "|72772d10debc4abd8373c0bc82985422|" #~ msgstr "" -#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgid "|5815398552ad41d290a3a2631fe8f6ca|" #~ msgstr "" -#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgid "|e6ac20744bf149378be20ac3dc309356|" #~ msgstr "" -#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgid "|a4011ef443c14725b15a8cf33b0e3443|" #~ msgstr "" -#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgid "|a22faa3617404c06803731525e1c609f|" #~ msgstr "" -#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgid "|84a5c9b5041c43c3beab9786197c3e4e|" #~ msgstr "" -#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgid "|b5c4be0b52d4493ba8c4af14d7c2db97|" #~ msgstr "" -#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgid "|c1c784183d18481186ff65dc261d1335|" #~ msgstr "" -#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgid "|669fcd1f44ab42f5bbd196c3cf1ecbc2|" #~ msgstr "" -#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgid "|edfb08758c9441afb6736045a59e154c|" #~ msgstr "" -#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgid "|82338b8bbad24d5ea9df3801aab37852|" #~ msgstr "" -#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgid "|518d994dd2c844898b441da03b858326|" #~ msgstr "" -#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgid "|7bfcfcb57ae5403f8e18486f45ca48b4|" #~ msgstr "" diff --git a/doc/source/_static/flower-architecture-deployment-engine.svg b/doc/source/_static/flower-architecture-deployment-engine.svg deleted file mode 100644 index 2e8dbdfd2626..000000000000 --- a/doc/source/_static/flower-architecture-deployment-engine.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - -
 User
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperExec



SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
Deployment Engine Executor
flwr run
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index 6111a972218f..c8d2a38acbc4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -19,7 +19,6 @@ import sys from git import Repo -from sphinx.application import ConfigError # Configuration file for the Sphinx documentation builder. # @@ -90,10 +89,10 @@ author = "The Flower Authors" # The full version of the next release, including alpha/beta/rc tags -release = "1.13.0" +release = "1.14.0" # The current released version rst_prolog = """ -.. |stable_flwr_version| replace:: 1.12.0 +.. |stable_flwr_version| replace:: 1.13.1 .. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c .. |ubuntu_version| replace:: 24.04 .. |setuptools_version| replace:: 70.3.0 @@ -231,6 +230,8 @@ def find_test_modules(package_path): "logging": "how-to-configure-logging.html", "ssl-enabled-connections": "how-to-enable-ssl-connections.html", "upgrade-to-flower-1.0": "how-to-upgrade-to-flower-1.0.html", + "how-to-upgrade-to-flower-next": "how-to-upgrade-to-flower-1.13.html", + "how-to-enable-ssl-connections.html": "how-to-enable-tls-connections.html", # Restructuring: explanations "evaluation": "explanation-federated-evaluation.html", "differential-privacy-wrappers": "explanation-differential-privacy.html", @@ -267,7 +268,14 @@ def find_test_modules(package_path): "contributor-how-to-create-new-messages": "index.html", "example-jax-from-centralized-to-federated": "tutorial-quickstart-jax.html", "architecture": "explanation-flower-architecture.html", - "contributor-explanation-architecture.html": "explanation-flower-architecture.html", + "contributor-explanation-architecture": "explanation-flower-architecture.html", + "example-pytorch-from-centralized-to-federated": "tutorial-quickstart-pytorch.html", + "example-fedbn-pytorch-from-centralized-to-federated": "how-to-implement-fedbn.html", + "how-to-configure-logging": "index.html", + "how-to-monitor-simulation": "how-to-run-simulations.html", + "fed/index": "index.html", + "fed/0000-20200102-fed-template": "index.html", + "fed/0001-20220311-flower-enhancement-doc": "index.html", } # -- Options for HTML output ------------------------------------------------- diff --git a/doc/source/contributor-how-to-release-flower.rst b/doc/source/contributor-how-to-release-flower.rst index fafc02cab64c..44982ab765ab 100644 --- a/doc/source/contributor-how-to-release-flower.rst +++ b/doc/source/contributor-how-to-release-flower.rst @@ -10,9 +10,9 @@ During the release The version number of a release is stated in ``pyproject.toml``. To release a new version of Flower, the following things need to happen (in that order): -1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add - every new change to the changelog (feel free to make manual changes to the changelog - afterwards until it looks good). +1. Run ``python3 ./dev/update_changelog.py `` in order to add every new + change to the changelog (feel free to make manual changes to the changelog afterwards + until it looks good). 2. Once the changelog has been updated with all the changes, run ``./dev/prepare-release-changelog.sh v``, where ```` is the version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will diff --git a/doc/source/contributor-ref-good-first-contributions.rst b/doc/source/contributor-ref-good-first-contributions.rst index a715e006f905..0d07f1f1d7ac 100644 --- a/doc/source/contributor-ref-good-first-contributions.rst +++ b/doc/source/contributor-ref-good-first-contributions.rst @@ -8,35 +8,32 @@ your chances of getting your PR accepted into the Flower codebase. Where to start -------------- -Until the Flower core library matures it will be easier to get PR's accepted if they -only touch non-core areas of the codebase. Good candidates to get started are: +In general, it is easier to get PR's accepted if they only touch non-core areas of the +codebase. Good candidates to get started are: - Documentation: What's missing? What could be expressed more clearly? +- Open issues: Issues with the tag `good first issue + `_. - Baselines: See below. - Examples: See below. -Request for Flower Baselines ----------------------------- +Flower Baselines +---------------- -If you are not familiar with Flower Baselines, you should probably check-out our -`contributing guide for baselines -`_. +If you are not familiar with Flower Baselines, please check our `contributing guide for +baselines `_. -You should then check out the open `issues +Then take a look at the open `issues `_ -for baseline requests. If you find a baseline that you'd like to work on and that has no -assignees, feel free to assign it to yourself and start working on it! +for baseline requests. If you find a baseline that you'd like to work on, and it has no +assignees, feel free to assign it to yourself and get started! -Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new -issue with the baseline request template! +If you don't find the baseline you'd like to work on, be sure to open a new issue with +the baseline request template! -Request for examples --------------------- - -We wish we had more time to write usage examples because we believe they help users to -get started with building what they want to build. Here are a few ideas where we'd be -happy to accept a PR: +Usage examples +-------------- -- Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch -- XGBoost -- Android ONNX on-device training +We wish we had more time to write usage examples because they help users to get started +with building what they want. If you notice any missing examples that could help others, +feel free to contribute! diff --git a/doc/source/contributor-ref-secure-aggregation-protocols.rst b/doc/source/contributor-ref-secure-aggregation-protocols.rst index 347cb2724424..0149fb8d73b5 100644 --- a/doc/source/contributor-ref-secure-aggregation-protocols.rst +++ b/doc/source/contributor-ref-secure-aggregation-protocols.rst @@ -1,434 +1,78 @@ Secure Aggregation Protocols ============================ -Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been -implemented yet, so its diagram and abstraction may not be accurate in practice. The -SecAgg protocol can be considered as a special case of the SecAgg+ protocol. - -The ``SecAgg+`` abstraction ---------------------------- - -In this implementation, each client will be assigned with a unique index (int) for -secure aggregation, and thus many python dictionaries used have keys of int type rather -than ClientProxy type. - -.. code-block:: python - - class SecAggPlusProtocol(ABC): - """Abstract base class for the SecAgg+ protocol implementations.""" - - @abstractmethod - def generate_graph(self, clients: List[ClientProxy], k: int) -> ClientGraph: - """Build a k-degree undirected graph of clients. - Each client will only generate pair-wise masks with its k neighbours. - k is equal to the number of clients in SecAgg, i.e., a complete graph. - This function may need extra inputs to decide on the generation of the graph.""" - - @abstractmethod - def setup_config( - self, clients: List[ClientProxy], config_dict: Dict[str, Scalar] - ) -> SetupConfigResultsAndFailures: - """Configure the next round of secure aggregation. (SetupConfigRes is an empty class.)""" - - @abstractmethod - def ask_keys( - self, clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] - ) -> AskKeysResultsAndFailures: - """Ask public keys. (AskKeysIns is an empty class, and hence ask_keys_ins_list can be omitted.)""" - - @abstractmethod - def share_keys( - self, - clients: List[ClientProxy], - public_keys_dict: Dict[int, AskKeysRes], - graph: ClientGraph, - ) -> ShareKeysResultsAndFailures: - """Send public keys.""" - - @abstractmethod - def ask_vectors( - clients: List[ClientProxy], - forward_packet_list_dict: Dict[int, List[ShareKeysPacket]], - client_instructions: Dict[int, FitIns] = None, - ) -> AskVectorsResultsAndFailures: - """Ask vectors of local model parameters. - (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.) - """ - - @abstractmethod - def unmask_vectors( - clients: List[ClientProxy], - dropout_clients: List[ClientProxy], - graph: ClientGraph, - ) -> UnmaskVectorsResultsAndFailures: - """Unmask and compute the aggregated model. UnmaskVectorRes contains shares of keys needed to generate masks.""" - -The Flower server will execute and process received results in the following order: +.. note:: + + While this term might be used in other places, here it refers to a series of + protocols, including ``SecAgg``, ``SecAgg+``, ``LightSecAgg``, ``FastSecAgg``, etc. + This concept was first proposed by Bonawitz et al. in `Practical Secure Aggregation + for Federated Learning on User-Held Data `_. + +Secure Aggregation protocols are used to securely aggregate model updates from multiple +clients while keeping the updates private. This is done by encrypting the model updates +before sending them to the server. The server can decrypt only the aggregated model +update without being able to inspect individual updates. + +Flower now provides the ``SecAgg`` and ``SecAgg+`` protocols. While we plan to implement +more protocols in the future, one may also implement their own custom secure aggregation +protocol via low-level APIs. + +The ``SecAgg+`` protocol in Flower +---------------------------------- + +The ``SecAgg+`` protocol is implemented using the ``SecAggPlusWorkflow`` in the +``ServerApp`` and the ``secaggplus_mod`` in the ``ClientApp``. The ``SecAgg`` protocol +is a special case of the ``SecAgg+`` protocol, and one may use ``SecAggWorkflow`` and +``secagg_mod`` for that. + +You may find a detailed example in the `Secure Aggregation Example +`_. The documentation +for the ``SecAgg+`` protocol configuration is available at `SecAggPlusWorkflow +`_. + +The logic of the ``SecAgg+`` protocol is illustrated in the following sequence diagram: +the dashed lines represent communication over the network, and the solid lines represent +communication within the same process. The ``ServerApp`` is connected to ``SuperLink``, +and the ``ClientApp`` is connected to the ``SuperNode``; thus, the communication between +the ``ServerApp`` and the ``ClientApp`` is done via the ``SuperLink`` and the +``SuperNode``. .. mermaid:: sequenceDiagram - participant S as Flower Server - participant P as SecAgg+ Protocol - participant C1 as Flower Client - participant C2 as Flower Client - participant C3 as Flower Client - - S->>P: generate_graph - activate P - P-->>S: client_graph - deactivate P - - Note left of P: Stage 0:
Setup Config - rect rgb(249, 219, 130) - S->>P: setup_config
clients, config_dict - activate P - P->>C1: SetupConfigIns - deactivate P - P->>C2: SetupConfigIns - P->>C3: SetupConfigIns - C1->>P: SetupConfigRes (empty) - C2->>P: SetupConfigRes (empty) - C3->>P: SetupConfigRes (empty) - activate P - P-->>S: None - deactivate P - end - - Note left of P: Stage 1:
Ask Keys - rect rgb(249, 219, 130) - S->>P: ask_keys
clients - activate P - P->>C1: AskKeysIns (empty) - deactivate P - P->>C2: AskKeysIns (empty) - P->>C3: AskKeysIns (empty) - C1->>P: AskKeysRes - C2->>P: AskKeysRes - C3->>P: AskKeysRes - activate P - P-->>S: public keys - deactivate P - end - - Note left of P: Stage 2:
Share Keys - rect rgb(249, 219, 130) - S->>P: share_keys
clients, public_keys_dict,
client_graph - activate P - P->>C1: ShareKeysIns - deactivate P - P->>C2: ShareKeysIns - P->>C3: ShareKeysIns - C1->>P: ShareKeysRes - C2->>P: ShareKeysRes - C3->>P: ShareKeysRes - activate P - P-->>S: encryted key shares - deactivate P - end - - Note left of P: Stage 3:
Ask Vectors - rect rgb(249, 219, 130) - S->>P: ask_vectors
clients,
forward_packet_list_dict - activate P - P->>C1: AskVectorsIns - deactivate P - P->>C2: AskVectorsIns - P->>C3: AskVectorsIns - C1->>P: AskVectorsRes - C2->>P: AskVectorsRes - activate P - P-->>S: masked vectors - deactivate P - end - - Note left of P: Stage 4:
Unmask Vectors - rect rgb(249, 219, 130) - S->>P: unmask_vectors
clients, dropped_clients,
client_graph - activate P - P->>C1: UnmaskVectorsIns - deactivate P - P->>C2: UnmaskVectorsIns - C1->>P: UnmaskVectorsRes - C2->>P: UnmaskVectorsRes - activate P - P-->>S: key shares - deactivate P - end - -The ``LightSecAgg`` abstraction -------------------------------- + participant ServerApp as ServerApp (in SuperLink) + participant SecAggPlusWorkflow + participant Mod as secaggplus_mod + participant ClientApp as ClientApp (in SuperNode) -In this implementation, each client will be assigned with a unique index (int) for -secure aggregation, and thus many python dictionaries used have keys of int type rather -than ClientProxy type. + ServerApp->>SecAggPlusWorkflow: Invoke -.. code-block:: python - - class LightSecAggProtocol(ABC): - """Abstract base class for the LightSecAgg protocol implementations.""" - - @abstractmethod - def setup_config( - self, clients: List[ClientProxy], config_dict: Dict[str, Scalar] - ) -> LightSecAggSetupConfigResultsAndFailures: - """Configure the next round of secure aggregation.""" - - @abstractmethod - def ask_encrypted_encoded_masks( - self, - clients: List[ClientProxy], - public_keys_dict: Dict[int, LightSecAggSetupConfigRes], - ) -> AskEncryptedEncodedMasksResultsAndFailures: - """Ask encrypted encoded masks. The protocol adopts Diffie-Hellman keys to build pair-wise secured channels to transfer encoded mask.""" - - @abstractmethod - def ask_masked_models( - self, - clients: List[ClientProxy], - forward_packet_list_dict: Dict[int, List[EncryptedEncodedMasksPacket]], - client_instructions: Dict[int, FitIns] = None, - ) -> AskMaskedModelsResultsAndFailures: - """Ask the masked local models. - (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.) - """ - - @abstractmethod - def ask_aggregated_encoded_masks( - clients: List[ClientProxy], - ) -> AskAggregatedEncodedMasksResultsAndFailures: - """Ask aggregated encoded masks""" - -The Flower server will execute and process received results in the following order: - -.. mermaid:: - - sequenceDiagram - participant S as Flower Server - participant P as LightSecAgg Protocol - participant C1 as Flower Client - participant C2 as Flower Client - participant C3 as Flower Client - - Note left of P: Stage 0:
Setup Config - rect rgb(249, 219, 130) - S->>P: setup_config
clients, config_dict - activate P - P->>C1: LightSecAggSetupConfigIns - deactivate P - P->>C2: LightSecAggSetupConfigIns - P->>C3: LightSecAggSetupConfigIns - C1->>P: LightSecAggSetupConfigRes - C2->>P: LightSecAggSetupConfigRes - C3->>P: LightSecAggSetupConfigRes - activate P - P-->>S: public keys - deactivate P + rect rgb(235, 235, 235) + note over SecAggPlusWorkflow,Mod: Stage 0: Setup + SecAggPlusWorkflow-->>Mod: Send SecAgg+ configuration + Mod-->>SecAggPlusWorkflow: Send public keys end - Note left of P: Stage 1:
Ask Encrypted Encoded Masks - rect rgb(249, 219, 130) - S->>P: ask_encrypted_encoded_masks
clients, public_keys_dict - activate P - P->>C1: AskEncryptedEncodedMasksIns - deactivate P - P->>C2: AskEncryptedEncodedMasksIns - P->>C3: AskEncryptedEncodedMasksIns - C1->>P: AskEncryptedEncodedMasksRes - C2->>P: AskEncryptedEncodedMasksRes - C3->>P: AskEncryptedEncodedMasksRes - activate P - P-->>S: forward packets - deactivate P + rect rgb(220, 220, 220) + note over SecAggPlusWorkflow,Mod: Stage 1: Share Keys + SecAggPlusWorkflow-->>Mod: Broadcast public keys + Mod-->>SecAggPlusWorkflow: Send encrypted private key shares end - Note left of P: Stage 2:
Ask Masked Models - rect rgb(249, 219, 130) - S->>P: share_keys
clients, forward_packet_list_dict - activate P - P->>C1: AskMaskedModelsIns - deactivate P - P->>C2: AskMaskedModelsIns - P->>C3: AskMaskedModelsIns - C1->>P: AskMaskedModelsRes - C2->>P: AskMaskedModelsRes - activate P - P-->>S: masked local models - deactivate P + rect rgb(235, 235, 235) + note over SecAggPlusWorkflow,ClientApp: Stage 2: Collect Masked Vectors + SecAggPlusWorkflow-->>Mod: Forward the received shares + Mod->>ClientApp: Fit instructions + activate ClientApp + ClientApp->>Mod: Updated model + deactivate ClientApp + Mod-->>SecAggPlusWorkflow: Send masked model parameters end - Note left of P: Stage 3:
Ask Aggregated Encoded Masks - rect rgb(249, 219, 130) - S->>P: ask_aggregated_encoded_masks
clients - activate P - P->>C1: AskAggregatedEncodedMasksIns - deactivate P - P->>C2: AskAggregatedEncodedMasksIns - C1->>P: AskAggregatedEncodedMasksRes - C2->>P: AskAggregatedEncodedMasksRes - activate P - P-->>S: the aggregated model - deactivate P + rect rgb(220, 220, 220) + note over SecAggPlusWorkflow,Mod: Stage 3: Unmask + SecAggPlusWorkflow-->>Mod: Request private key shares + Mod-->>SecAggPlusWorkflow: Send private key shares end - -Types ------ - -.. code-block:: python - - # the SecAgg+ protocol - - ClientGraph = Dict[int, List[int]] - - SetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] - ] - - AskKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] - ] - - ShareKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] - ] - - AskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] - ] - - UnmaskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] - ] - - FitResultsAndFailures = Tuple[List[Tuple[ClientProxy, FitRes]], List[BaseException]] - - - @dataclass - class SetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] - - - @dataclass - class SetupConfigRes: - pass - - - @dataclass - class AskKeysIns: - pass - - - @dataclass - class AskKeysRes: - """Ask Keys Stage Response from client to server""" - - pk1: bytes - pk2: bytes - - - @dataclass - class ShareKeysIns: - public_keys_dict: Dict[int, AskKeysRes] - - - @dataclass - class ShareKeysPacket: - source: int - destination: int - ciphertext: bytes - - - @dataclass - class ShareKeysRes: - share_keys_res_list: List[ShareKeysPacket] - - - @dataclass - class AskVectorsIns: - ask_vectors_in_list: List[ShareKeysPacket] - fit_ins: FitIns - - - @dataclass - class AskVectorsRes: - parameters: Parameters - - - @dataclass - class UnmaskVectorsIns: - available_clients: List[int] - dropout_clients: List[int] - - - @dataclass - class UnmaskVectorsRes: - share_dict: Dict[int, bytes] - - - # the LightSecAgg protocol - - LightSecAggSetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] - ] - - AskEncryptedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] - ] - - AskMaskedModelsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] - ] - - AskAggregatedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] - ] - - - @dataclass - class LightSecAggSetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] - - - @dataclass - class LightSecAggSetupConfigRes: - pk: bytes - - - @dataclass - class AskEncryptedEncodedMasksIns: - public_keys_dict: Dict[int, LightSecAggSetupConfigRes] - - - @dataclass - class EncryptedEncodedMasksPacket: - source: int - destination: int - ciphertext: bytes - - - @dataclass - class AskEncryptedEncodedMasksRes: - packet_list: List[EncryptedEncodedMasksPacket] - - - @dataclass - class AskMaskedModelsIns: - packet_list: List[EncryptedEncodedMasksPacket] - fit_ins: FitIns - - - @dataclass - class AskMaskedModelsRes: - parameters: Parameters - - - @dataclass - class AskAggregatedEncodedMasksIns: - surviving_clients: List[int] - - - @dataclass - class AskAggregatedEncodedMasksRes: - aggregated_encoded_mask: Parameters + SecAggPlusWorkflow->>SecAggPlusWorkflow: Unmask aggregated model + SecAggPlusWorkflow->>ServerApp: Aggregated model diff --git a/doc/source/docker/enable-tls.rst b/doc/source/docker/enable-tls.rst index f50edb8c651d..eaa853298439 100644 --- a/doc/source/docker/enable-tls.rst +++ b/doc/source/docker/enable-tls.rst @@ -2,10 +2,7 @@ Enable TLS for Secure Connections ================================= When operating in a production environment, it is strongly recommended to enable -Transport Layer Security (TLS) for each Flower Component to ensure secure communication. - -To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key -and a PEM-encoded certificate chain. +Transport Layer Security (TLS) for each Flower component to ensure secure communication. .. note:: @@ -14,139 +11,246 @@ and a PEM-encoded certificate chain. `__ page contains a section that will guide you through the process. -Because Flower containers, by default, run with a non-root user ``app``, the mounted -files and directories must have the proper permissions for the user ID ``49999``. - -For example, to change the user ID of all files in the ``certificates/`` directory, you -can run ``sudo chown -R 49999:49999 certificates/*``. - -If you later want to delete the directory, you can change the user ID back to the -current user ID by running ``sudo chown -R $USER:$(id -gn) state``. - -SuperLink ---------- - -Assuming all files we need are in the local ``certificates`` directory, we can use the -flag ``--volume`` to mount the local directory into the ``/app/certificates/`` directory -of the container: - -.. code-block:: bash - :substitutions: - - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superlink:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key - -.. dropdown:: Understanding the command - - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperLink. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. - -SuperNode ---------- - -Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag -``--volume`` to mount the local certificate into the container's ``/app/`` directory. +.. note:: + + Because Flower containers, by default, run with a non-root user ``app``, the mounted + files and directories must have the proper permissions for the user ID ``49999``. + + For example, to change the user ID of all files in the ``certificates/`` directory, + you can run ``sudo chown -R 49999:49999 certificates/*``. + + If you later want to delete the directory, you can change the user ID back to the + current user ID by running ``sudo chown -R $USER:$(id -gn) certificates``. + +.. tab-set:: + + .. tab-item:: Isolation Mode ``subprocess`` + + By default, the ServerApp is executed as a subprocess within the SuperLink Docker + container, and the ClientApp is run as a subprocess within the SuperNode Docker + container. You can learn more about the different process modes here: + :doc:`run-as-subprocess`. + + To enable TLS between the SuperLink and SuperNode, as well as between the SuperLink and the ``flwr`` + CLI, you will need a PEM-encoded root certificate, private key, and certificate chain. + + **SuperLink** + + Assuming all files we need are in the local ``superlink-certificates`` directory, + we can use the flag ``--volume`` to mount the local directories into the SuperLink container: + + .. code-block:: bash + + $ docker run --rm \ + --volume ./superlink-certificates/:/app/certificates/:ro \ + \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + + + .. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the ``superlink-certificates`` + | directory in the current working directory of the host machine as a read-only volume + | at the ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * ````: The name of your SuperLink image to be run. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperLink. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + + **SuperNode** + + .. note:: + + If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't + exist on the SuperNode, you can copy it over after the generation step. + + .. code-block:: bash + + $ docker run --rm \ + --volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro \ + \ + --root-certificates ca.crt \ + + + .. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` + | file from the ``superlink-certificates`` directory of the host machine as a read-only + | volume at the ``/app/ca.crt`` directory inside the container. + * ````: The name of your SuperNode image to be run. + * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file + | inside the container. + | + | The ``ca.crt`` file is used to verify the identity of the SuperLink. + + .. tab-item:: Isolation Mode ``process`` + + In isolation mode ``process``, the ServerApp and ClientApp run in their own processes. + Unlike in isolation mode ``subprocess``, the SuperLink or SuperNode does not attempt to + create the respective processes; instead, they must be created externally. + + It is possible to run only the SuperLink in isolation mode ``subprocess`` and the + SuperNode in isolation mode ``process``, or vice versa, or even both with isolation mode + ``process``. + + **SuperLink and ServerApp** + + To enable TLS between the SuperLink and SuperNode, as well as between the SuperLink and the ``flwr`` + CLI, you will need a PEM-encoded root certificate, private key, and certificate chain. + + Assuming all files we need are in the local ``superlink-certificates`` directory, we can + use the flag ``--volume`` to mount the local directory into the SuperLink container: + + + .. code-block:: bash + :substitutions: + + $ docker run --rm \ + --volume ./superlink-certificates/:/app/certificates/:ro \ + flwr/superlink:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + --isolation process \ + + + .. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./superlink-certificates/:/app/certificates/:ro``: Mount the + | ``superlink-certificates`` directory in the current working directory of the host + | machine as a read-only volume at the ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperLink. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + * | ``--isolation process``: Tells the SuperLink that the ServerApp is created by separate + | independent process. The SuperLink does not attempt to create it. + + Start the ServerApp container: + + .. code-block:: bash + + $ docker run --rm \ + \ + --insecure \ + + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * ````: The name of your ServerApp image to be run. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. Secure connections will be added in future releases. + + **SuperNode and ClientApp** + + .. note:: + + If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't + exist on the SuperNode, you can copy it over after the generation step. + + Start the SuperNode container: + + .. code-block:: bash + :substitutions: + + $ docker run --rm \ + --volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro \ + flwr/supernode:|stable_flwr_version| \ + --root-certificates ca.crt \ + --isolation process \ + + + .. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./superlink-certificates/ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the + | ``superlink-certificates`` directory of the host machine as a read-only volume at the ``/app/ca.crt`` + | directory inside the container. + * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file + | inside the container. + | + | The ``ca.crt`` file is used to verify the identity of the SuperLink. + * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate + | independent process. The SuperNode does not attempt to create it. + + Start the ClientApp container: + + .. code-block:: bash + + $ docker run --rm \ + \ + --insecure \ + + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * ````: The name of your ClientApp image to be run. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. Secure connections will be added in future releases. + +Append the following lines to the end of the ``pyproject.toml`` file and save it: + +.. code-block:: toml + :caption: pyproject.toml + + [tool.flwr.federations.local-deployment-tls] + address = "127.0.0.1:9093" + root-certificates = "../superlink-certificates/ca.crt" + +The path of the ``root-certificates`` should be relative to the location of the +``pyproject.toml`` file. .. note:: - If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't - exist on the SuperNode, you can copy it over after the generation step. - -.. code-block:: bash - :substitutions: - - $ docker run --rm \ - --volume ./ca.crt:/app/ca.crt/:ro \ - flwr/supernode:|stable_flwr_version| \ - --root-certificates ca.crt - -.. dropdown:: Understanding the command - - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the - | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` - | directory inside the container. - * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file - | inside the container. - | - | The ``ca.crt`` file is used to verify the identity of the SuperLink. - -SuperExec ---------- - -Assuming all files we need are in the local ``certificates`` directory where the -SuperExec will be executed from, we can use the flag ``--volume`` to mount the local -directory into the ``/app/certificates/`` directory of the container: - -.. code-block:: bash - :substitutions: - - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superexec:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key \ - --executor-config \ - root-certificates=\"certificates/superlink_ca.crt\" - -.. dropdown:: Understanding the command - - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperExec. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. - * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the - | location of the CA certificate file inside the container that the SuperExec executor - | should use to verify the SuperLink's identity. + You can customize the string that follows ``tool.flwr.federations.`` to fit your + needs. However, please note that the string cannot contain a dot (``.``). + + In this example, ``local-deployment-tls`` has been used. Just remember to replace + ``local-deployment-tls`` with your chosen name in both the + ``tool.flwr.federations.`` string and the corresponding ``flwr run .`` command. diff --git a/doc/source/docker/persist-superlink-state.rst b/doc/source/docker/persist-superlink-state.rst index 214e408c44c3..e61fcd3a63c2 100644 --- a/doc/source/docker/persist-superlink-state.rst +++ b/doc/source/docker/persist-superlink-state.rst @@ -31,7 +31,7 @@ specify the name of the database file. $ docker run --rm \ --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ --database state.db \ - ... + As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` directory on your host system. If the file already exists, the SuperLink tries to diff --git a/doc/source/docker/pin-version.rst b/doc/source/docker/pin-version.rst index 4a69860aa428..36ed4ee5d379 100644 --- a/doc/source/docker/pin-version.rst +++ b/doc/source/docker/pin-version.rst @@ -25,7 +25,7 @@ This will output .. code-block:: bash :substitutions: - flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| + flwr/superlink@sha256:|stable_flwr_superlink_docker_digest| Next, we can pin the digest when running a new SuperLink container: @@ -33,5 +33,5 @@ Next, we can pin the digest when running a new SuperLink container: :substitutions: $ docker run \ - --rm flwr/superlink@sha256:|latest_version_docker_sha| \ - [OPTIONS] + --rm flwr/superlink@sha256:|stable_flwr_superlink_docker_digest| \ + diff --git a/doc/source/docker/run-as-root-user.rst b/doc/source/docker/run-as-root-user.rst index 5f8e5eae43af..24bd39e2f53d 100644 --- a/doc/source/docker/run-as-root-user.rst +++ b/doc/source/docker/run-as-root-user.rst @@ -16,7 +16,7 @@ Run the Docker image with the ``-u`` flag and specify ``root`` as the username: .. code-block:: bash :substitutions: - $ docker run --rm -u root flwr/superlink:|stable_flwr_version| + $ docker run --rm -u root flwr/superlink:|stable_flwr_version| This command will run the Docker container with root user privileges. diff --git a/doc/source/docker/run-as-subprocess.rst b/doc/source/docker/run-as-subprocess.rst index d97319ff52af..c9b8404820b7 100644 --- a/doc/source/docker/run-as-subprocess.rst +++ b/doc/source/docker/run-as-subprocess.rst @@ -1,53 +1,112 @@ -Run ClientApp as a Subprocess -============================= +Run ServerApp or ClientApp as a Subprocess +========================================== -In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker -container, rather than running in a separate container. This approach reduces the number -of running containers, which can be beneficial for environments with limited resources. -However, it also means that the ClientApp is no longer isolated from the SuperNode, -which may introduce additional security concerns. +The SuperLink and SuperNode components support two distinct isolation modes, allowing +for flexible deployment and control: -Prerequisites -------------- +1. Subprocess Mode: In this configuration (default), the SuperLink and SuperNode take + responsibility for launching the ServerApp and ClientApp processes internally. This + differs from the ``process`` isolation-mode which uses separate containers, as + demonstrated in the :doc:`tutorial-quickstart-docker` guide. -1. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have - been installed in the SuperNode images. This can be done by extending the SuperNode - image: + Using the ``subprocess`` approach reduces the number of running containers, which can + be beneficial for environments with limited resources. However, it also means that + the applications are not isolated from their parent containers, which may introduce + additional security concerns. - .. code-block:: dockerfile - :caption: Dockerfile.supernode - :linenos: - :substitutions: +2. Process Mode: In this mode, the ServerApp and ClientApps run in completely separate + processes. Unlike the alternative Subprocess mode, the SuperLink or SuperNode does + not attempt to create or manage these processes. Instead, they must be started + externally. - FROM flwr/supernode:|stable_flwr_version| +Both modes can be mixed for added flexibility. For instance, you can run the SuperLink +in ``subprocess`` mode while keeping the SuperNode in ``process`` mode, or vice versa. - WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . +To run the SuperLink and SuperNode in isolation mode ``process``, refer to the +:doc:`tutorial-quickstart-docker` guide. To run them in ``subprocess`` mode, follow the +instructions below. - ENTRYPOINT ["flower-supernode"] +.. tab-set:: -2. Next, build the SuperNode Docker image by running the following command in the - directory where Dockerfile is located: + .. tab-item:: ServerApp - .. code-block:: shell + **Prerequisites** - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + 1. Before running the ServerApp as a subprocess, ensure that the FAB dependencies have + been installed in the SuperLink images. This can be done by extending the SuperLink image: -Run the ClientApp as a Subprocess ---------------------------------- + .. code-block:: dockerfile + :caption: superlink.Dockerfile + :linenos: + :substitutions: -Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode -to execute the ClientApp as a subprocess: + FROM flwr/superlink:|stable_flwr_version| -.. code-block:: shell + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . - $ docker run --rm \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config "partition-id=1 num-partitions=2" \ - --supernode-address localhost:9094 \ - --isolation subprocess + ENTRYPOINT ["flower-superlink"] + + 2. Next, build the SuperLink Docker image by running the following command in the + directory where Dockerfile is located: + + .. code-block:: shell + + $ docker build -f superlink.Dockerfile -t flwr_superlink:0.0.1 . + + **Run the ServerApp as a Subprocess** + + Start the SuperLink and run the ServerApp as a subprocess (note that + the subprocess mode is the default, so you do not have to explicitly set the ``--isolation`` flag): + + .. code-block:: shell + + $ docker run --rm \ + -p 9091:9091 -p 9092:9092 -p 9093:9093 \ + --detach \ + flwr_superlink:0.0.1 \ + --insecure + + .. tab-item:: ClientApp + + **Prerequisites** + + 1. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have + been installed in the SuperNode images. This can be done by extending the SuperNode + image: + + .. code-block:: dockerfile + :caption: supernode.Dockerfile + :linenos: + :substitutions: + + FROM flwr/supernode:|stable_flwr_version| + + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-supernode"] + + 2. Next, build the SuperNode Docker image by running the following command in the + directory where Dockerfile is located: + + .. code-block:: shell + + $ docker build -f supernode.Dockerfile -t flwr_supernode:0.0.1 . + + **Run the ClientApp as a Subprocess** + + Start the SuperNode and run the ClientApp as a subprocess (note that + the subprocess mode is the default, so you do not have to explicitly set the ``--isolation`` flag): + + .. code-block:: shell + + $ docker run --rm \ + --detach \ + flwr_supernode:0.0.1 \ + --insecure \ + --superlink :9092 diff --git a/doc/source/docker/run-quickstart-examples-docker-compose.rst b/doc/source/docker/run-quickstart-examples-docker-compose.rst index b31f0035e143..385d5599461d 100644 --- a/doc/source/docker/run-quickstart-examples-docker-compose.rst +++ b/doc/source/docker/run-quickstart-examples-docker-compose.rst @@ -19,7 +19,7 @@ Before you start, make sure that: - The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. - The Docker daemon is running. -- Docker Compose is `installed `_. +- Docker Compose V2 is `installed `_. Run the Quickstart Example -------------------------- @@ -37,14 +37,18 @@ Run the Quickstart Example into the example directory: .. code-block:: bash + :substitutions: - $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ + $ curl https://raw.githubusercontent.com/adap/flower/refs/tags/v|stable_flwr_version|/src/docker/complete/compose.yml \ -o compose.yml -3. Build and start the services using the following command: +3. Export the version of Flower that your environment uses. Then, build and start the + services using the following command: .. code-block:: bash + :substitutions: + $ export FLWR_VERSION="|stable_flwr_version|" # update with your version $ docker compose up --build -d 4. Append the following lines to the end of the ``pyproject.toml`` file and save it: @@ -65,20 +69,14 @@ Run the Quickstart Example ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` string and the corresponding ``flwr run .`` command. -5. Run the example: +5. Run the example and follow the logs of the ``ServerApp`` : .. code-block:: bash - $ flwr run . local-deployment - -6. Follow the logs of the SuperExec service: - - .. code-block:: bash - - $ docker compose logs superexec -f + $ flwr run . local-deployment --stream That is all it takes! You can monitor the progress of the run through the logs of the -SuperExec. +``ServerApp``. Run a Different Quickstart Example ---------------------------------- @@ -105,7 +103,7 @@ Limitations - - quickstart-huggingface - None - - quickstart-jax - - The example has not yet been updated to work with the latest ``flwr`` version. + - None - - quickstart-mlcube - The example has not yet been updated to work with the latest ``flwr`` version. - - quickstart-mlx @@ -124,4 +122,4 @@ Limitations - - quickstart-tabnet - The example has not yet been updated to work with the latest ``flwr`` version. - - quickstart-tensorflow - - Only runs on AMD64. + - None diff --git a/doc/source/docker/set-environment-variables.rst b/doc/source/docker/set-environment-variables.rst index f5d860812bab..c9c6bc946509 100644 --- a/doc/source/docker/set-environment-variables.rst +++ b/doc/source/docker/set-environment-variables.rst @@ -11,4 +11,5 @@ Example :substitutions: $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ - --rm flwr/superlink:|stable_flwr_version| + --rm flwr/superlink:|stable_flwr_version| \ + diff --git a/doc/source/docker/tutorial-deploy-on-multiple-machines.rst b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst index 72958c926ba9..ffe0b090af9e 100644 --- a/doc/source/docker/tutorial-deploy-on-multiple-machines.rst +++ b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst @@ -6,7 +6,7 @@ Compose. You will learn how to run the Flower client and server components on two separate machines, with Flower configured to use TLS encryption and persist SuperLink state -across restarts. A server consists of a SuperLink and ``SuperExec``. For more details +across restarts. A server consists of a SuperLink and a ``ServerApp``. For more details about the Flower architecture, refer to the :doc:`../explanation-flower-architecture` explainer page. @@ -38,8 +38,9 @@ Step 1: Set Up 1. Clone the Flower repository and change to the ``distributed`` directory: .. code-block:: bash + :substitutions: - $ git clone --depth=1 https://github.com/adap/flower.git + $ git clone --depth=1 --branch v|stable_flwr_version| https://github.com/adap/flower.git $ cd flower/src/docker/distributed 2. Get the IP address from the remote machine and save it for later. @@ -53,54 +54,66 @@ Step 1: Set Up For production environments, you may have to use dedicated services to obtain your certificates. - First, set the environment variables ``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the - IP address from the remote machine. For example, if the IP is ``192.168.2.33``, - execute: + First, set the environment variable ``SUPERLINK_IP`` with the IP address from the + remote machine. For example, if the IP is ``192.168.2.33``, execute: .. code-block:: bash $ export SUPERLINK_IP=192.168.2.33 - $ export SUPEREXEC_IP=192.168.2.33 Next, generate the self-signed certificates: .. code-block:: bash - $ docker compose -f certs.yml -f ../complete/certs.yml up --build + $ docker compose -f certs.yml -f ../complete/certs.yml run --rm --build gen-certs Step 2: Copy the Server Compose Files ------------------------------------- Use the method that works best for you to copy the ``server`` directory, the -certificates, and your Flower project to the remote machine. +certificates, and the ``pyproject.toml`` file of your Flower project to the remote +machine. For example, you can use ``scp`` to copy the directories: .. code-block:: bash $ scp -r ./server \ - ./superexec-certificates \ ./superlink-certificates \ - ../../../examples/quickstart-sklearn-tabular remote:~/distributed + ../../../examples/quickstart-sklearn-tabular/pyproject.toml remote:~/distributed Step 3: Start the Flower Server Components ------------------------------------------ Log into the remote machine using ``ssh`` and run the following command to start the -SuperLink and SuperExec services: +SuperLink and ``ServerApp`` services: .. code-block:: bash + :linenos: - $ ssh - # In your remote machine - $ cd - $ export PROJECT_DIR=../quickstart-sklearn-tabular - $ docker compose -f server/compose.yml up --build -d + $ ssh + # In your remote machine + $ cd + $ export PROJECT_DIR=../ + $ docker compose -f server/compose.yml up --build -d + +.. note:: + + The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file should be + relative to the location of the server ``compose.yml`` file. .. note:: - The Path of the ``PROJECT_DIR`` should be relative to the location of the ``server`` - Docker Compose files. + When working with Docker Compose on Linux, you may need to create the ``state`` + directory first and change its ownership to ensure proper access and permissions. + After exporting the ``PROJECT_DIR`` (after line 4), run the following commands: + + .. code-block:: bash + + $ mkdir server/state + $ sudo chown -R 49999:49999 server/state + + For more information, consult the following page: :doc:`persist-superlink-state`. Go back to your terminal on your local machine. @@ -117,33 +130,33 @@ On your local machine, run the following command to start the client components: .. note:: - The Path of the ``PROJECT_DIR`` should be relative to the location of the ``client`` - Docker Compose files. + The path to the ``PROJECT_DIR`` containing the ``pyproject.toml`` file should be + relative to the location of the client ``compose.yml`` file. Step 5: Run Your Flower Project ------------------------------- -Specify the remote SuperExec IP addresses and the path to the root certificate in the -``[tool.flwr.federations.remote-superexec]`` table in the ``pyproject.toml`` file. Here, -we have named our remote federation ``remote-superexec``: +Specify the remote SuperLink IP addresses and the path to the root certificate in the +``[tool.flwr.federations.remote-deployment]`` table in the ``pyproject.toml`` file. +Here, we have named our remote federation ``remote-deployment``: .. code-block:: toml :caption: examples/quickstart-sklearn-tabular/pyproject.toml - [tool.flwr.federations.remote-superexec] + [tool.flwr.federations.remote-deployment] address = "192.168.2.33:9093" - root-certificates = "../../src/docker/distributed/superexec-certificates/ca.crt" + root-certificates = "../../src/docker/distributed/superlink-certificates/ca.crt" .. note:: - The Path of the ``root-certificates`` should be relative to the location of the + The path of the ``root-certificates`` should be relative to the location of the ``pyproject.toml`` file. -To run the project, execute: +Run the project and follow the ``ServerApp`` logs: .. code-block:: bash - $ flwr run ../../../examples/quickstart-sklearn-tabular remote-superexec + $ flwr run ../../../examples/quickstart-sklearn-tabular remote-deployment --stream That's it! With these steps, you've set up Flower on two separate machines and are ready to start using it. diff --git a/doc/source/docker/tutorial-quickstart-docker-compose.rst b/doc/source/docker/tutorial-quickstart-docker-compose.rst index bff3125c1b16..3c0a6463e50e 100644 --- a/doc/source/docker/tutorial-quickstart-docker-compose.rst +++ b/doc/source/docker/tutorial-quickstart-docker-compose.rst @@ -16,7 +16,7 @@ Before you start, make sure that: - The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. - The Docker daemon is running. -- Docker Compose is `installed `_. +- Docker Compose V2 is `installed `_. Step 1: Set Up -------------- @@ -24,8 +24,9 @@ Step 1: Set Up 1. Clone the Docker Compose ``complete`` directory: .. code-block:: bash + :substitutions: - $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ + $ git clone --depth=1 --branch v|stable_flwr_version| https://github.com/adap/flower.git _tmp \ && mv _tmp/src/docker/complete . \ && rm -rf _tmp && cd complete @@ -43,7 +44,8 @@ Step 1: Set Up $ export PROJECT_DIR=quickstart-compose Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, - allowing it to install dependencies in the SuperExec and SuperNode images correctly. + allowing it to install dependencies in the ``ServerApp`` and ``ClientApp`` images + correctly. Step 2: Run Flower in Insecure Mode ----------------------------------- @@ -63,12 +65,11 @@ Open your terminal and run: .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose up --build -d .. dropdown:: Understand the command * ``docker compose``: The Docker command to run the Docker Compose tool. - * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. * ``--build``: Rebuild the images for each service if they don't already exist. * ``-d``: Detach the containers from the terminal and run them in the background. @@ -78,7 +79,7 @@ Step 3: Run the Quickstart Project Now that the Flower services have been started via Docker Compose, it is time to run the quickstart example. -To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec +To ensure the ``flwr`` CLI connects to the SuperLink, you need to specify the SuperLink addresses in the ``pyproject.toml`` file. 1. Add the following lines to the ``quickstart-compose/pyproject.toml``: @@ -86,21 +87,16 @@ addresses in the ``pyproject.toml`` file. .. code-block:: toml :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose] + [tool.flwr.federations.local-deployment] address = "127.0.0.1:9093" insecure = true -2. Execute the command to run the quickstart example: +2. Run the quickstart example, monitor the ``ServerApp`` logs and wait for the summary + to appear: .. code-block:: bash - $ flwr run quickstart-compose docker-compose - -3. Monitor the SuperExec logs and wait for the summary to appear: - - .. code-block:: bash - - $ docker compose logs superexec -f + $ flwr run quickstart-compose local-deployment --stream Step 4: Update the Application ------------------------------ @@ -135,30 +131,25 @@ In the next step, change the application code. .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose up --build -d 3. Run the updated quickstart example: .. code-block:: bash - $ flwr run quickstart-compose docker-compose - $ docker compose logs superexec -f + $ flwr run quickstart-compose local-deployment --stream - In the SuperExec logs, you should find the ``Get weights`` line: + In the ``ServerApp`` logs, you should find the ``Get weights`` line: .. code-block:: - :emphasize-lines: 9 - - superexec-1 | INFO : Starting Flower SuperExec - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. - superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 - superexec-1 | INFO : ExecServicer.StartRun - superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. - superexec-1 | INFO : Created run -6767165609169293507 - superexec-1 | INFO : Started run -6767165609169293507 - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. - superexec-1 | Get weights - superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + :emphasize-lines: 5 + + INFO : Starting logstream for run_id `10386255862566726253` + INFO : Starting Flower ServerApp + WARNING : Option `--insecure` was set. Starting insecure HTTP channel to superlink:9091. + 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower.quickstart-compose.1.0.0.35361a47. + Get weights + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout Step 5: Persisting the SuperLink State -------------------------------------- @@ -194,7 +185,7 @@ service, ensuring that it maintains its state even after a restart. .. code-block:: bash - $ flwr run quickstart-compose docker-compose + $ flwr run quickstart-compose local-deployment --stream 3. Check the content of the ``state`` directory: @@ -227,16 +218,16 @@ Step 6: Run Flower with TLS .. code-block:: bash - $ docker compose -f certs.yml up --build + $ docker compose -f certs.yml run --rm --build gen-certs 2. Add the following lines to the ``quickstart-compose/pyproject.toml``: .. code-block:: toml :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose-tls] + [tool.flwr.federations.local-deployment-tls] address = "127.0.0.1:9093" - root-certificates = "../superexec-certificates/ca.crt" + root-certificates = "../superlink-certificates/ca.crt" 3. Restart the services with TLS enabled: @@ -248,19 +239,13 @@ Step 6: Run Flower with TLS .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f - -Step 7: Add another SuperNode ------------------------------ - -You can add more SuperNodes and ClientApps by duplicating their definitions in the -``compose.yml`` file. + $ flwr run quickstart-compose local-deployment-tls --stream -Just give each new SuperNode and ClientApp service a unique service name like -``supernode-3``, ``clientapp-3``, etc. +Step 7: Add another SuperNode and ClientApp +------------------------------------------- -In ``compose.yml``, add the following: +You can add more SuperNodes and ClientApps by uncommenting their definitions in the +``compose.yml`` file: .. code-block:: yaml :caption: compose.yml @@ -274,7 +259,7 @@ In ``compose.yml``, add the following: - --insecure - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9096 - --isolation - process @@ -303,7 +288,8 @@ In ``compose.yml``, add the following: ENTRYPOINT ["flwr-clientapp"] command: - - --supernode + - --insecure + - --clientappio-api-address - supernode-3:9096 deploy: resources: @@ -313,12 +299,8 @@ In ``compose.yml``, add the following: depends_on: - supernode-3 -If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode -definition for each new SuperNode service in the ``with-tls.yml`` file. - -Make sure that the names of the services match with the one in the ``compose.yml`` file. - -In ``with-tls.yml``, add the following: +If you also want to enable TLS for the new SuperNode, uncomment the definition in the +``with-tls.yml`` file: .. code-block:: yaml :caption: with-tls.yml @@ -329,17 +311,25 @@ In ``with-tls.yml``, add the following: command: - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9096 - --isolation - process - --node-config - "partition-id=1 num-partitions=2" - --root-certificates - - certificates/ca.crt + - certificates/superlink-ca.crt secrets: - source: superlink-ca-certfile - target: /app/certificates/ca.crt + target: /app/certificates/superlink-ca.crt + +Restart the services with: + +.. code-block:: bash + + $ docker compose up --build -d + # or with TLS enabled + $ docker compose -f compose.yml -f with-tls.yml up --build -d Step 8: Persisting the SuperLink State and Enabling TLS ------------------------------------------------------- @@ -347,18 +337,22 @@ Step 8: Persisting the SuperLink State and Enabling TLS To run Flower with persisted SuperLink state and enabled TLS, a slight change in the ``with-state.yml`` file is required: -1. Comment out the lines 2-4 and uncomment the lines 5-9: +1. Comment out the lines 2-6 and uncomment the lines 7-13: .. code-block:: yaml :caption: with-state.yml :linenos: - :emphasize-lines: 2-9 + :emphasize-lines: 2-13 superlink: # command: - # - --insecure - # - --database=state/state.db + # - --insecure + # - --isolation + # - process + # - --database=state/state.db command: + - --isolation + - process - --ssl-ca-certfile=certificates/ca.crt - --ssl-certfile=certificates/server.pem - --ssl-keyfile=certificates/server.key @@ -376,8 +370,7 @@ To run Flower with persisted SuperLink state and enabled TLS, a slight change in .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f + $ flwr run quickstart-compose local-deployment-tls --stream Step 9: Merge Multiple Compose Files ------------------------------------ @@ -402,7 +395,6 @@ Remove all services and volumes: .. code-block:: bash $ docker compose down -v - $ docker compose -f certs.yml down -v Where to Go Next ---------------- diff --git a/doc/source/docker/tutorial-quickstart-docker.rst b/doc/source/docker/tutorial-quickstart-docker.rst index 993754dcf109..3bc311923f8f 100644 --- a/doc/source/docker/tutorial-quickstart-docker.rst +++ b/doc/source/docker/tutorial-quickstart-docker.rst @@ -34,7 +34,6 @@ Step 1: Set Up flwr run $ cd quickstart-docker - $ pip install -e . 2. Create a new Docker bridge network called ``flwr-network``: @@ -55,19 +54,23 @@ Open your terminal and run: :substitutions: $ docker run --rm \ - -p 9091:9091 -p 9092:9092 \ + -p 9091:9091 -p 9092:9092 -p 9093:9093 \ --network flwr-network \ --name superlink \ --detach \ - flwr/superlink:|stable_flwr_version| --insecure + flwr/superlink:|stable_flwr_version| \ + --insecure \ + --isolation \ + process .. dropdown:: Understand the command * ``docker run``: This tells Docker to run a container from an image. * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of - | the host machine, allowing other services to access the Driver API on - | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. + * | ``-p 9091:9091 -p 9092:9092 -p 9093:9093``: Map port ``9091``, ``9092`` and ``9093`` of the + | container to the same port of the host machine, allowing other services to access the + | ServerAppIO API on ``http://localhost:9091``, the Fleet API on ``http://localhost:9092`` and + | the Exec API on ``http://localhost:9093``. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. * ``--name superlink``: Assign the name ``superlink`` to the container. * ``--detach``: Run the container in the background, freeing up the terminal. @@ -75,9 +78,12 @@ Open your terminal and run: | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing | unencrypted communication. + * | ``--isolation process``: Tells the SuperLink that the ServerApp is created by separate + | independent process. The SuperLink does not attempt to create it. You can learn more about + | the different process modes here: :doc:`run-as-subprocess`. -Step 3: Start the SuperNode ---------------------------- +Step 3: Start the SuperNodes +---------------------------- Start two SuperNode containers. @@ -95,7 +101,7 @@ Start two SuperNode containers. --insecure \ --superlink superlink:9092 \ --node-config "partition-id=0 num-partitions=2" \ - --supernode-address 0.0.0.0:9094 \ + --clientappio-api-address 0.0.0.0:9094 \ --isolation process .. dropdown:: Understand the command @@ -108,16 +114,18 @@ Start two SuperNode containers. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag - | of the image. + * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: This is the name of the + | image to be run and the specific tag of the image. * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing | unencrypted communication. * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address | ``superlink:9092``. * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the | number of partitions to ``2`` for the SuperNode configuration. - * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode - | is listening on. + * | ``--clientappio-api-address 0.0.0.0:9094``: Set the address and port number that the + | SuperNode is listening on to communicate with the ClientApp. If + | two SuperNodes are started on the same machine, set two different port numbers for each SuperNode. + | (E.g. In the next step, we set the second SuperNode container to listen on port 9095) * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate | independent process. The SuperNode does not attempt to create it. @@ -135,38 +143,38 @@ Start two SuperNode containers. --insecure \ --superlink superlink:9092 \ --node-config "partition-id=1 num-partitions=2" \ - --supernode-address 0.0.0.0:9095 \ + --clientappio-api-address 0.0.0.0:9095 \ --isolation process -Step 4: Start the ClientApp ---------------------------- +Step 4: Start a ServerApp +------------------------- -The ClientApp Docker image comes with a pre-installed version of Flower and serves as a -base for building your own ClientApp image. In order to install the FAB dependencies, -you will need to create a Dockerfile that extends the ClientApp image and installs the +The ServerApp Docker image comes with a pre-installed version of Flower and serves as a +base for building your own ServerApp image. In order to install the FAB dependencies, +you will need to create a Dockerfile that extends the ServerApp image and installs the required dependencies. -1. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following - code into it: +1. Create a ServerApp Dockerfile called ``serverapp.Dockerfile`` and paste the following + code in: .. code-block:: dockerfile - :caption: Dockerfile.clientapp - :linenos: + :caption: serverapp.Dockerfile :substitutions: - FROM flwr/clientapp:|stable_flwr_version| + FROM flwr/serverapp:|stable_flwr_version| WORKDIR /app + COPY pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flwr-clientapp"] + ENTRYPOINT ["flwr-serverapp"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. + * | :substitution-code:`FROM flwr/serverapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/serverapp`` image, version :substitution-code:`|stable_flwr_version|`. * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. | Any subsequent commands that reference a directory will be relative to this directory. * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file @@ -178,7 +186,7 @@ required dependencies. | | The ``-U`` flag indicates that any existing packages should be upgraded, and | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be + * | ``ENTRYPOINT ["flwr-serverapp"]``: Set the command ``flwr-serverapp`` to be | the default command run when the container is started. .. important:: @@ -189,80 +197,69 @@ required dependencies. ``flwr`` dependency is removed from the ``pyproject.toml`` after it has been copied into the Docker image (see line 5). -2. Next, build the ClientApp Docker image by running the following command in the - directory where the Dockerfile is located: +2. Afterward, in the directory that holds the Dockerfile, execute this Docker command to + build the ServerApp image: .. code-block:: bash - $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + $ docker build -f serverapp.Dockerfile -t flwr_serverapp:0.0.1 . - .. note:: - - The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember - that these values are merely examples, and you can customize them according to - your requirements. - -3. Start the first ClientApp container: +3. Start the ServerApp container: .. code-block:: bash $ docker run --rm \ --network flwr-network \ + --name serverapp \ --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-1:9094 + flwr_serverapp:0.0.1 \ + --insecure \ + --serverappio-api-address superlink:9091 .. dropdown:: Understand the command * ``docker run``: This tells Docker to run a container from an image. * ``--rm``: Remove the container once it is stopped or the command exits. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name serverapp``: Assign the name ``serverapp`` to the container. * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag + * | ``flwr_serverapp:0.0.1``: This is the name of the image to be run and the specific tag | of the image. - * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address - | ``supernode-1:9094``. - -4. Start the second ClientApp container: - - .. code-block:: shell - - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-2:9095 + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. Secure connections will be added in future releases. + * | ``--serverappio-api-address superlink:9091``: Connect to the SuperLink's ServerAppIO API + | at the address ``superlink:9091``. -Step 5: Start the SuperExec +Step 5: Start the ClientApp --------------------------- -The procedure for building and running a SuperExec image is almost identical to the -ClientApp image. +The procedure for building and running a ClientApp image is almost identical to the +ServerApp image. -Similar to the ClientApp image, you will need to create a Dockerfile that extends the -SuperExec image and installs the required FAB dependencies. +Similar to the ServerApp image, you will need to create a Dockerfile that extends the +ClientApp image and installs the required FAB dependencies. -1. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following - code in: +1. Create a ClientApp Dockerfile called ``clientapp.Dockerfile`` and paste the following + code into it: .. code-block:: dockerfile - :caption: Dockerfile.superexec + :caption: clientapp.Dockerfile + :linenos: :substitutions: - FROM flwr/superexec:|stable_flwr_version| + FROM flwr/clientapp:|stable_flwr_version| WORKDIR /app - COPY pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] + ENTRYPOINT ["flwr-clientapp"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. + * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/clientapp`` image, version :substitution-code:`|stable_flwr_version|`. * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. | Any subsequent commands that reference a directory will be relative to this directory. * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file @@ -274,47 +271,56 @@ SuperExec image and installs the required FAB dependencies. | | The ``-U`` flag indicates that any existing packages should be upgraded, and | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be + * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be | the default command run when the container is started. - | - | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the - | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. -2. Afterward, in the directory that holds the Dockerfile, execute this Docker command to - build the SuperExec image: +2. Next, build the ClientApp Docker image by running the following command in the + directory where the Dockerfile is located: .. code-block:: bash - $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . + $ docker build -f clientapp.Dockerfile -t flwr_clientapp:0.0.1 . + + .. note:: + + The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember + that these values are merely examples, and you can customize them according to + your requirements. -3. Start the SuperExec container: +3. Start the first ClientApp container: .. code-block:: bash $ docker run --rm \ - -p 9093:9093 \ --network flwr-network \ - --name superexec \ --detach \ - flwr_superexec:0.0.1 \ + flwr_clientapp:0.0.1 \ --insecure \ - --executor-config superlink=\"superlink:9091\" + --clientappio-api-address supernode-1:9094 .. dropdown:: Understand the command * ``docker run``: This tells Docker to run a container from an image. * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of - | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name superexec``: Assign the name ``superexec`` to the container. * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag - | of the image. * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to - | connect to the SuperLink running on port ``9091``. + | unencrypted communication. Secure connections will be added in future releases. + * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--clientappio-api-address supernode-1:9094``: Connect to the SuperNode's ClientAppIO + | API at the address ``supernode-1:9094``. + +4. Start the second ClientApp container: + + .. code-block:: shell + + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --insecure \ + --clientappio-api-address supernode-2:9095 Step 6: Run the Quickstart Project ---------------------------------- @@ -324,21 +330,16 @@ Step 6: Run the Quickstart Project .. code-block:: toml :caption: pyproject.toml - [tool.flwr.federations.docker] + [tool.flwr.federations.local-deployment] address = "127.0.0.1:9093" insecure = true -2. Run the ``quickstart-docker`` project by executing the command: - - .. code-block:: bash - - $ flwr run . docker - -3. Follow the SuperExec logs to track the execution of the run: +2. Run the ``quickstart-docker`` project and follow the ServerApp logs to track the + execution of the run: .. code-block:: bash - $ docker logs -f superexec + $ flwr run . local-deployment --stream Step 7: Update the Application ------------------------------ @@ -353,38 +354,56 @@ Step 7: Update the Application partition_train_test = partition.train_test_split(test_size=0.2, seed=43) # ... -2. Stop the current ClientApp containers: +2. Stop the current ServerApp and ClientApp containers: + + .. note:: + + If you have modified the dependencies listed in your ``pyproject.toml`` file, it + is essential to rebuild images. + + If you haven’t made any changes, you can skip steps 2 through 4. .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) serverapp -3. Rebuild the FAB and ClientApp image: +3. Rebuild ServerApp and ClientApp images: .. code-block:: bash - $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + $ docker build -f clientapp.Dockerfile -t flwr_clientapp:0.0.1 . && \ + docker build -f serverapp.Dockerfile -t flwr_serverapp:0.0.1 . -4. Launch two new ClientApp containers based on the newly built image: +4. Launch one new ServerApp and two new ClientApp containers based on the newly built + image: .. code-block:: bash + $ docker run --rm \ + --network flwr-network \ + --name serverapp \ + --detach \ + flwr_serverapp:0.0.1 \ + --insecure \ + --serverappio-api-address superlink:9091 $ docker run --rm \ --network flwr-network \ --detach \ flwr_clientapp:0.0.1 \ - --supernode supernode-1:9094 + --insecure \ + --clientappio-api-address supernode-1:9094 $ docker run --rm \ --network flwr-network \ --detach \ flwr_clientapp:0.0.1 \ - --supernode supernode-2:9095 + --insecure \ + --clientappio-api-address supernode-2:9095 5. Run the updated project: .. code-block:: bash - $ flwr run . docker + $ flwr run . local-deployment --stream Step 8: Clean Up ---------------- @@ -396,7 +415,7 @@ Remove the containers and the bridge network: $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ supernode-1 \ supernode-2 \ - superexec \ + serverapp \ superlink $ docker network rm flwr-network diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst deleted file mode 100644 index 4a9d4607d9a5..000000000000 --- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst +++ /dev/null @@ -1,120 +0,0 @@ -Example: FedBN in PyTorch - From Centralized To Federated -========================================================= - -This tutorial will show you how to use Flower to build a federated version of an -existing machine learning workload with `FedBN `_, a -federated training strategy designed for non-iid data. We are using PyTorch to train a -Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. -When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From -Centralized To Federated `. - -Centralized Training --------------------- - -All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated -`. The only thing to do is modifying the -file called ``cifar.py``, revised part is shown below: - -The model architecture defined in class Net() is added with Batch Normalization layers -accordingly. - -.. code-block:: python - - class Net(nn.Module): - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.bn1 = nn.BatchNorm2d(6) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.bn2 = nn.BatchNorm2d(16) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.bn3 = nn.BatchNorm1d(120) - self.fc2 = nn.Linear(120, 84) - self.bn4 = nn.BatchNorm1d(84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: Tensor) -> Tensor: - x = self.pool(F.relu(self.bn1(self.conv1(x)))) - x = self.pool(F.relu(self.bn2(self.conv2(x)))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.bn3(self.fc1(x))) - x = F.relu(self.bn4(self.fc2(x))) - x = self.fc3(x) - return x - -You can now run your machine learning workload: - -.. code-block:: bash - - python3 cifar.py - -So far this should all look fairly familiar if you've used PyTorch before. Let's take -the next step and use what we've built to create a federated learning system within -FedBN, the system consists of one server and two clients. - -Federated Training ------------------- - -If you have read :doc:`Example: PyTorch - From Centralized To Federated -`, the following parts are easy to -follow, only ``get_parameters`` and ``set_parameters`` function in ``client.py`` needed -to revise. If not, please read the :doc:`Example: PyTorch - From Centralized To -Federated `. first. - -Our example consists of one *server* and two *clients*. In FedBN, ``server.py`` keeps -unchanged, we can start the server directly. - -.. code-block:: bash - - python3 server.py - -Finally, we will revise our *client* logic by changing ``get_parameters`` and -``set_parameters`` in ``client.py``, we will exclude batch normalization parameters from -model parameter list when sending to or receiving from the server. - -.. code-block:: python - - class CifarClient(fl.client.NumPyClient): - """Flower client implementing CIFAR-10 image classification using - PyTorch.""" - - ... - - def get_parameters(self, config) -> List[np.ndarray]: - # Return model parameters as a list of NumPy ndarrays, excluding parameters of BN layers when using FedBN - return [ - val.cpu().numpy() - for name, val in self.model.state_dict().items() - if "bn" not in name - ] - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Set model parameters from a list of NumPy ndarrays - keys = [k for k in self.model.state_dict().keys() if "bn" not in k] - params_dict = zip(keys, parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=False) - - ... - -Now, you can now open two additional terminal windows and run - -.. code-block:: bash - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see -your (previously centralized) PyTorch project run federated learning with FedBN strategy -across two clients. Congratulations! - -Next Steps ----------- - -The full source code for this example can be found `here -`_. -Our example is of course somewhat over-simplified because both clients load the exact -same dataset, which isn't realistic. You're now prepared to explore this topic further. -How about using different subsets of CIFAR-10 on each client? How about adding more -clients? diff --git a/doc/source/example-pytorch-from-centralized-to-federated.rst b/doc/source/example-pytorch-from-centralized-to-federated.rst deleted file mode 100644 index 9629a7fed6e8..000000000000 --- a/doc/source/example-pytorch-from-centralized-to-federated.rst +++ /dev/null @@ -1,356 +0,0 @@ -Example: PyTorch - From Centralized To Federated -================================================ - -This tutorial will show you how to use Flower to build a federated version of an -existing machine learning workload. We are using PyTorch to train a Convolutional Neural -Network on the CIFAR-10 dataset. First, we introduce this machine learning task with a -centralized training approach based on the `Deep Learning with PyTorch -`_ tutorial. Then, -we build upon the centralized training code to run the training in a federated fashion. - -Centralized Training --------------------- - -We begin with a brief description of the centralized CNN training code. If you want a -more in-depth explanation of what's going on then have a look at the official `PyTorch -tutorial `_. - -Let's create a new file called ``cifar.py`` with all the components required for a -traditional (centralized) training on CIFAR-10. First, all required packages (such as -``torch`` and ``torchvision``) need to be imported. You can see that we do not import -any package for federated learning. You can keep all these imports as they are even when -we add the federated learning components at a later point. - -.. code-block:: python - - from typing import Tuple, Dict - - import torch - import torch.nn as nn - import torch.nn.functional as F - import torchvision - import torchvision.transforms as transforms - from torch import Tensor - from torchvision.datasets import CIFAR10 - -As already mentioned we will use the CIFAR-10 dataset for this machine learning -workload. The model architecture (a very simple Convolutional Neural Network) is defined -in ``class Net()``. - -.. code-block:: python - - class Net(nn.Module): - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: Tensor) -> Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - -The ``load_data()`` function loads the CIFAR-10 training and test sets. The -``transform`` normalized the data after loading. - -.. code-block:: python - - DATA_ROOT = "~/data/cifar-10" - - - def load_data() -> ( - Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict] - ): - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - trainset = CIFAR10(DATA_ROOT, train=True, download=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) - testset = CIFAR10(DATA_ROOT, train=False, download=True, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - num_examples = {"trainset": len(trainset), "testset": len(testset)} - return trainloader, testloader, num_examples - -We now need to define the training (function ``train()``) which loops over the training -set, measures the loss, backpropagates it, and then takes one optimizer step for each -batch of training examples. - -The evaluation of the model is defined in the function ``test()``. The function loops -over all test samples and measures the loss of the model based on the test dataset. - -.. code-block:: python - - def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, - ) -> None: - """Train the network.""" - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 100 == 99: # print every 100 mini-batches - print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - - def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, - ) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - correct = 0 - total = 0 - loss = 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy - -Having defined the data loading, model architecture, training, and evaluation we can put -everything together and train our CNN on CIFAR-10. - -.. code-block:: python - - def main(): - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - print("Centralized PyTorch training") - print("Load data") - trainloader, testloader, _ = load_data() - print("Start training") - net = Net().to(DEVICE) - train(net=net, trainloader=trainloader, epochs=2, device=DEVICE) - print("Evaluate model") - loss, accuracy = test(net=net, testloader=testloader, device=DEVICE) - print("Loss: ", loss) - print("Accuracy: ", accuracy) - - - if __name__ == "__main__": - main() - -You can now run your machine learning workload: - -.. code-block:: bash - - python3 cifar.py - -So far, this should all look fairly familiar if you've used PyTorch before. Let's take -the next step and use what we've built to create a simple federated learning system -consisting of one server and two clients. - -Federated Training ------------------- - -The simple machine learning project discussed in the previous section trains the model -on a single dataset (CIFAR-10), we call this centralized learning. This concept of -centralized learning, as shown in the previous section, is probably known to most of -you, and many of you have used it previously. Normally, if you'd want to run machine -learning workloads in a federated fashion, then you'd have to change most of your code -and set everything up from scratch. This can be a considerable effort. - -However, with Flower you can evolve your pre-existing code into a federated learning -setup without the need for a major rewrite. - -The concept is easy to understand. We have to start a *server* and then use the code in -``cifar.py`` for the *clients* that are connected to the *server*. The *server* sends -model parameters to the clients. The *clients* run the training and update the -parameters. The updated parameters are sent back to the *server* which averages all -received parameter updates. This describes one round of the federated learning process -and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up ``server.py`` -first. The *server* needs to import the Flower package ``flwr``. Next, we use the -``start_server`` function to start a server and tell it to perform three rounds of -federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) - ) - -We can already start the *server*: - -.. code-block:: bash - - python3 server.py - -Finally, we will define our *client* logic in ``client.py`` and build upon the -previously defined centralized training in ``cifar.py``. Our *client* needs to import -``flwr``, but also ``torch`` to update the parameters on our PyTorch model: - -.. code-block:: python - - from collections import OrderedDict - from typing import Dict, List, Tuple - - import numpy as np - import torch - - import cifar - import flwr as fl - - DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -Implementing a Flower *client* basically means implementing a subclass of either -``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based -on ``flwr.client.NumPyClient`` and we'll call it ``CifarClient``. ``NumPyClient`` is -slightly easier to implement than ``Client`` if you use a framework with good NumPy -interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the -boilerplate that would otherwise be necessary. ``CifarClient`` needs to implement four -methods, two methods for getting/setting model parameters, one method for training the -model, and one method for testing the model: - -1. ``set_parameters`` - - set the model parameters on the local model that are received from the server - - loop over the list of model parameters received as NumPy ``ndarray``'s (think - list of neural network layers) -2. ``get_parameters`` - - get the model parameters and return them as a list of NumPy ``ndarray``'s - (which is what ``flwr.client.NumPyClient`` expects) -3. ``fit`` - - update the parameters of the local model with the parameters received from the - server - - train the model on the local training set - - get the updated local model weights and return them to the server -4. ``evaluate`` - - update the parameters of the local model with the parameters received from the - server - - evaluate the updated model on the local test set - - return the local loss and accuracy to the server - -The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions -``train()`` and ``test()`` previously defined in ``cifar.py``. So what we really do here -is we tell Flower through our ``NumPyClient`` subclass which of our already defined -functions to call for training and evaluation. We included type annotations to give you -a better understanding of the data types that get passed around. - -.. code-block:: python - - class CifarClient(fl.client.NumPyClient): - """Flower client implementing CIFAR-10 image classification using - PyTorch.""" - - def __init__( - self, - model: cifar.Net, - trainloader: torch.utils.data.DataLoader, - testloader: torch.utils.data.DataLoader, - num_examples: Dict, - ) -> None: - self.model = model - self.trainloader = trainloader - self.testloader = testloader - self.num_examples = num_examples - - def get_parameters(self, config) -> List[np.ndarray]: - # Return model parameters as a list of NumPy ndarrays - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Set model parameters from a list of NumPy ndarrays - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - cifar.train(self.model, self.trainloader, epochs=1, device=DEVICE) - return self.get_parameters(config={}), self.num_examples["trainset"], {} - - def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[float, int, Dict]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) - return float(loss), self.num_examples["testset"], {"accuracy": float(accuracy)} - -All that's left to do it to define a function that loads both model and data, creates a -``CifarClient``, and starts this client. You load your data and model by using -``cifar.py``. Start ``CifarClient`` with the function ``fl.client.start_client()`` by -pointing it at the same IP address we used in ``server.py``: - -.. code-block:: python - - def main() -> None: - """Load data, start CifarClient.""" - - # Load model and data - model = cifar.Net() - model.to(DEVICE) - trainloader, testloader, num_examples = cifar.load_data() - - # Start client - client = CifarClient(model, trainloader, testloader, num_examples) - fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) - - - if __name__ == "__main__": - main() - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: bash - - python3 client.py - -in each window (make sure that the server is running before you do so) and see your -(previously centralized) PyTorch project run federated learning across two clients. -Congratulations! - -Next Steps ----------- - -The full source code for this example: `PyTorch: From Centralized To Federated (Code) -`_. -Our example is, of course, somewhat over-simplified because both clients load the exact -same dataset, which isn't realistic. You're now prepared to explore this topic further. -How about using different subsets of CIFAR-10 on each client? How about adding more -clients? diff --git a/doc/source/explanation-federated-evaluation.rst b/doc/source/explanation-federated-evaluation.rst index c56a5d48b2f6..376219c777ff 100644 --- a/doc/source/explanation-federated-evaluation.rst +++ b/doc/source/explanation-federated-evaluation.rst @@ -16,7 +16,9 @@ current global model parameters as input and return evaluation results: .. code-block:: python - from flwr.common import NDArrays, Scalar + from flwr.common import Context, NDArrays, Scalar + from flwr.server import ServerApp, ServerAppComponents, ServerConfig + from flwr.server.strategy import FedAvg from typing import Dict, Optional, Tuple @@ -41,21 +43,28 @@ current global model parameters as input and return evaluation results: return evaluate - # Load and compile model for server-side parameter evaluation - model = tf.keras.applications.EfficientNetB0( - input_shape=(32, 32, 3), weights=None, classes=10 - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + # Load and compile model for server-side parameter evaluation + model = tf.keras.applications.EfficientNetB0( + input_shape=(32, 32, 3), weights=None, classes=10 + ) + model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + + # Create strategy + strategy = FedAvg( + # ... other FedAvg arguments + evaluate_fn=get_evaluate_fn(model), + ) + + return ServerAppComponents(strategy=strategy, config=config) - # Create strategy - strategy = fl.server.strategy.FedAvg( - # ... other FedAvg arguments - evaluate_fn=get_evaluate_fn(model), - ) - # Start Flower server for four rounds of federated learning - fl.server.start_server(server_address="[::]:8080", strategy=strategy) + # Create ServerApp + app = ServerApp(server_fn=server_fn) Custom Strategies ~~~~~~~~~~~~~~~~~ @@ -76,16 +85,15 @@ from the server side. .. code-block:: python - class CifarClient(fl.client.NumPyClient): + from flwr.client import NumPyClient + + + class FlowerClient(NumPyClient): def __init__(self, model, x_train, y_train, x_test, y_test): self.model = model self.x_train, self.y_train = x_train, y_train self.x_test, self.y_test = x_test, y_test - def get_parameters(self, config): - # ... - pass - def fit(self, parameters, config): # ... pass @@ -131,6 +139,11 @@ the following arguments: .. code-block:: python + from flwr.common import Context + from flwr.server import ServerApp, ServerAppComponents, ServerConfig + from flwr.server.strategy import FedAvg + + def evaluate_config(server_round: int): """Return evaluation configuration dict for each round. Perform five local evaluation steps on each client (i.e., use five @@ -142,7 +155,7 @@ the following arguments: # Create strategy - strategy = fl.server.strategy.FedAvg( + strategy = FedAvg( # ... other FedAvg arguments fraction_evaluate=0.2, min_evaluate_clients=2, @@ -150,8 +163,15 @@ the following arguments: on_evaluate_config_fn=evaluate_config, ) - # Start Flower server for four rounds of federated learning - fl.server.start_server(server_address="[::]:8080", strategy=strategy) + + def server_fn(context: Context): + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + return ServerAppComponents(strategy=strategy, config=config) + + + # Create ServerApp + app = ServerApp(server_fn=server_fn) Evaluating Local Model Updates During Training ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -161,16 +181,15 @@ arbitrary evaluation results as a dictionary: .. code-block:: python - class CifarClient(fl.client.NumPyClient): + from flwr.client import NumPyClient + + + class FlowerClient(NumPyClient): def __init__(self, model, x_train, y_train, x_test, y_test): self.model = model self.x_train, self.y_train = x_train, y_train self.x_test, self.y_test = x_test, y_test - def get_parameters(self, config): - # ... - pass - def fit(self, parameters, config): """Train parameters on the locally held training set.""" @@ -201,6 +220,6 @@ Full Code Example ----------------- For a full code example that uses both centralized and federated evaluation, see the -*Advanced TensorFlow Example* (the same approach can be applied to workloads implemented -in any other framework): -https://github.com/adap/flower/tree/main/examples/advanced-tensorflow +`Advanced TensorFlow Example +`_ (the same +approach can be applied to workloads implemented in any other framework). diff --git a/doc/source/explanation-flower-architecture.rst b/doc/source/explanation-flower-architecture.rst index e82da56dcefa..e7da17514732 100644 --- a/doc/source/explanation-flower-architecture.rst +++ b/doc/source/explanation-flower-architecture.rst @@ -118,24 +118,6 @@ in the training: Therefore, with Flower multi-run, different projects (each consisting of a ``ServerApp`` and ``ClientApp``) can run on different sets of clients. -To help you start and manage all of the concurrently executing training runs, Flower -offers one additional long-running server-side service called **SuperExec**. When you -type ``flwr run`` to start a new training run, the ``flwr`` CLI bundles your local -project (mainly your ``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. -The **SuperExec** will then take care of starting and managing your ``ServerApp``, which -in turn selects SuperNodes to execute your ``ClientApp``. - -This architecture allows many users to (concurrently) run their projects on the same -federation, simply by typing ``flwr run`` on their local developer machine. - -.. figure:: ./_static/flower-architecture-deployment-engine.svg - :align: center - :width: 800 - :alt: Flower Deployment Engine with SuperExec - :class: no-scaled-link - - The SuperExec service for managing concurrent training runs in Flower. - .. note:: This explanation covers the Flower Deployment Engine. An explanation covering the diff --git a/doc/source/fed/0000-20200102-fed-template.md b/doc/source/fed/0000-20200102-fed-template.md deleted file mode 100644 index 39031c4520f6..000000000000 --- a/doc/source/fed/0000-20200102-fed-template.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -fed-number: 0000 -title: FED Template -authors: ['@adap'] -creation-date: 2020-01-02 -last-updated: 2020-01-02 -status: provisional ---- - -# FED Template - -## Table of Contents - -- [Table of Contents](#table-of-contents) -- [Summary](#summary) -- [Motivation](#motivation) - - [Goals](#goals) - - [Non-Goals](#non-goals) -- [Proposal](#proposal) -- [Drawbacks](#drawbacks) -- [Alternatives Considered](#alternatives-considered) -- [Appendix](#appendix) - -## Summary - -\[TODO - sentence 1: summary of the problem\] - -\[TODO - sentence 2: summary of the solution\] - -## Motivation - -\[TODO\] - -### Goals - -\[TODO\] - -### Non-Goals - -\[TODO\] - -## Proposal - -\[TODO\] - -## Drawbacks - -\[TODO\] - -## Alternatives Considered - -### \[Alternative 1\] - -\[TODO\] - -### \[Alternative 2\] - -\[TODO\] - -## Appendix diff --git a/doc/source/fed/0001-20220311-flower-enhancement-doc.md b/doc/source/fed/0001-20220311-flower-enhancement-doc.md deleted file mode 100644 index 037142e36f8a..000000000000 --- a/doc/source/fed/0001-20220311-flower-enhancement-doc.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -fed-number: 0001 -title: Flower Enhancement Doc -authors: ['@nfnt', '@orlandohohmeier'] -creation-date: 2022-03-11 -last-updated: 2022-12-12 -status: provisional ---- - -# Flower Enhancement Doc - -## Table of Contents - -- [Table of Contents](#table-of-contents) -- [Summary](#summary) -- [Motivation](#motivation) - - [Goals](#goals) - - [Non-Goals](#non-goals) -- [Proposal](#proposal) - - [Enhancement Doc Template](#enhancement-doc-template) - - [Metadata](#metadata) - - [Workflow](#workflow) -- [Drawbacks](#drawbacks) -- [Alternatives Considered](#alternatives-considered) - - [GitHub Issues](#github-issues) - - [Google Docs](#google-docs) - -## Summary - -A Flower Enhancement is a standardized development process to - -- provide a common structure for proposing larger changes -- ensure that the motivation for a change is clear -- persist project information in a version control system -- document the motivation for impactful user-facing changes -- reserve GitHub issues for tracking work in flight -- ensure community participants can successfully drive changes to completion across one or more releases while stakeholders are adequately represented throughout the process - -Hence, an Enhancement Doc combines aspects of - -- a feature, and effort-tracking document -- a product requirements document -- a design document - -into one file, which is created incrementally in collaboration with the community. - -## Motivation - -For far-fetching changes or features proposed to Flower, an abstraction beyond a single GitHub issue or pull request is required to understand and communicate upcoming changes to the project. - -The purpose of this process is to reduce the amount of "tribal knowledge" in our community. By moving decisions from Slack threads, video calls, and hallway conversations into a well-tracked artifact, this process aims to enhance communication and discoverability. - -### Goals - -Roughly any larger, user-facing enhancement should follow the Enhancement process. If an enhancement would be described in either written or verbal communication to anyone besides the author or developer, then consider creating an Enhancement Doc. - -Similarly, any technical effort (refactoring, major architectural change) that will impact a large section of the development community should also be communicated widely. The Enhancement process is suited for this even if it will have zero impact on the typical user or operator. - -### Non-Goals - -For small changes and additions, going through the Enhancement process would be time-consuming and unnecessary. This includes, for example, adding new Federated Learning algorithms, as these only add features without changing how Flower works or is used. - -Enhancements are different from feature requests, as they are already providing a laid-out path for implementation and are championed by members of the community. - -## Proposal - -An Enhancement is captured in a Markdown file that follows a defined template and a workflow to review and store enhancement docs for reference — the Enhancement Doc. - -### Enhancement Doc Template - -Each enhancement doc is provided as a Markdown file having the following structure - -- Metadata (as [described below](#metadata) in form of a YAML preamble) -- Title (same as in metadata) -- Table of Contents (if needed) -- Summary -- Motivation - - Goals - - Non-Goals -- Proposal - - Notes/Constraints/Caveats (optional) -- Design Details (optional) - - Graduation Criteria - - Upgrade/Downgrade Strategy (if applicable) -- Drawbacks -- Alternatives Considered - -As a reference, this document follows the above structure. - -### Metadata - -- **fed-number** (Required) - The `fed-number` of the last Flower Enhancement Doc + 1. With this number, it becomes easy to reference other proposals. -- **title** (Required) - The title of the proposal in plain language. -- **status** (Required) - The current status of the proposal. See [workflow](#workflow) for the possible states. -- **authors** (Required) - A list of authors of the proposal. This is simply the GitHub ID. -- **creation-date** (Required) - The date that the proposal was first submitted in a PR. -- **last-updated** (Optional) - The date that the proposal was last changed significantly. -- **see-also** (Optional) - A list of other proposals that are relevant to this one. -- **replaces** (Optional) - A list of proposals that this one replaces. -- **superseded-by** (Optional) - A list of proposals that this one supersedes. - -### Workflow - -The idea forming the enhancement should already have been discussed or pitched in the community. As such, it needs a champion, usually the author, who shepherds the enhancement. This person also has to find committers to Flower willing to review the proposal. - -New enhancements are checked in with a file name in the form of `NNNN-YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement Doc number, to `enhancements`. All enhancements start in `provisional` state as part of a pull request. Discussions are done as part of the pull request review. - -Once an enhancement has been reviewed and approved, its status is changed to `implementable`. The actual implementation is then done in separate pull requests. These pull requests should mention the respective enhancement as part of their description. After the implementation is done, the proposal status is changed to `implemented`. - -Under certain conditions, other states are possible. An Enhancement has the following states: - -- `provisional`: The enhancement has been proposed and is actively being defined. This is the starting state while the proposal is being fleshed out and actively defined and discussed. -- `implementable`: The enhancement has been reviewed and approved. -- `implemented`: The enhancement has been implemented and is no longer actively changed. -- `deferred`: The enhancement is proposed but not actively being worked on. -- `rejected`: The authors and reviewers have decided that this enhancement is not moving forward. -- `withdrawn`: The authors have withdrawn the enhancement. -- `replaced`: The enhancement has been replaced by a new enhancement. - -## Drawbacks - -Adding an additional process to the ones already provided by GitHub (Issues and Pull Requests) adds more complexity and can be a barrier for potential first-time contributors. - -Expanding the proposal template beyond the single-sentence description currently required in the features issue template may be a heavy burden for non-native English speakers. - -## Alternatives Considered - -### GitHub Issues - -Using GitHub Issues for these kinds of enhancements is doable. One could use, for example, tags, to differentiate and filter them from other issues. The main issue is in discussing and reviewing an enhancement: GitHub issues only have a single thread for comments. Enhancements usually have multiple threads of discussion at the same time for various parts of the doc. Managing these multiple discussions can be confusing when using GitHub Issues. - -### Google Docs - -Google Docs allow for multiple threads of discussions. But as Google Docs are hosted outside the project, their discoverability by the community needs to be taken care of. A list of links to all proposals has to be managed and made available for the community. Compared to shipping proposals as part of Flower's repository, the potential for missing links is much higher. diff --git a/doc/source/fed/index.md b/doc/source/fed/index.md deleted file mode 100644 index 4f680d9367cc..000000000000 --- a/doc/source/fed/index.md +++ /dev/null @@ -1,9 +0,0 @@ -# FED - Flower Enhancement Doc - -```{toctree} ---- -maxdepth: 1 ---- -0000-20200102-fed-template.md -0001-20220311-flower-enhancement-doc -``` diff --git a/doc/source/how-to-aggregate-evaluation-results.rst b/doc/source/how-to-aggregate-evaluation-results.rst index be6e20068c88..578100d7d537 100644 --- a/doc/source/how-to-aggregate-evaluation-results.rst +++ b/doc/source/how-to-aggregate-evaluation-results.rst @@ -13,11 +13,10 @@ by returning a dictionary: .. code-block:: python - class CifarClient(fl.client.NumPyClient): + from flwr.client import NumPyClient - def get_parameters(self, config): - # ... - pass + + class FlowerClient(NumPyClient): def fit(self, parameters, config): # ... @@ -34,14 +33,17 @@ by returning a dictionary: # Return results, including the custom accuracy metric num_examples_test = len(self.x_test) - return loss, num_examples_test, {"accuracy": accuracy} + return float(loss), num_examples_test, {"accuracy": float(accuracy)} The server can then use a customized strategy to aggregate the metrics provided in these dictionaries: .. code-block:: python - class AggregateCustomMetricStrategy(fl.server.strategy.FedAvg): + from flwr.server.strategy import FedAvg + + + class AggregateCustomMetricStrategy(FedAvg): def aggregate_evaluate( self, server_round: int, @@ -69,11 +71,24 @@ dictionaries: ) # Return aggregated loss and metrics (i.e., aggregated accuracy) - return aggregated_loss, {"accuracy": aggregated_accuracy} + return float(aggregated_loss), {"accuracy": float(aggregated_accuracy)} + + + def server_fn(context: Context) -> ServerAppComponents: + # Read federation rounds from config + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + # Define strategy + strategy = AggregateCustomMetricStrategy( + # (same arguments as FedAvg here) + ) + + return ServerAppComponents( + config=config, + strategy=strategy, # <-- pass the custom strategy here + ) - # Create strategy and run server - strategy = AggregateCustomMetricStrategy( - # (same arguments as FedAvg here) - ) - fl.server.start_server(strategy=strategy) + # Create ServerApp + app = ServerApp(server_fn=server_fn) diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst index c950ab3be9e7..e5d20b9a851f 100644 --- a/doc/source/how-to-configure-clients.rst +++ b/doc/source/how-to-configure-clients.rst @@ -1,143 +1,291 @@ -Configure clients +Configure Clients ================= -Along with model parameters, Flower can send configuration values to clients. -Configuration values can be used for various purposes. They are, for example, a popular -way to control client-side hyperparameters from the server. +Flower provides the ability to send configuration values to clients, allowing +server-side control over client behavior. This feature enables flexible and dynamic +adjustment of client-side hyperparameters, improving collaboration and experimentation. Configuration values -------------------- -Configuration values are represented as a dictionary with ``str`` keys and values of -type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or -equivalent types in different languages). Here is an example of a configuration -dictionary in Python: +``FitConfig`` and ``EvaluateConfig`` are dictionaries containing configuration values +that the server sends to clients during federated learning rounds. These values must be +of type ``Scalar``, which includes ``bool``, ``bytes``, ``float``, ``int``, or ``str`` +(or equivalent types in different languages). Scalar is the value type directly +supported by Flower for these configurations. + +For example, a ``FitConfig`` dictionary might look like this: .. code-block:: python - config_dict = { - "dropout": True, # str key, bool value - "learning_rate": 0.01, # str key, float value - "batch_size": 32, # str key, int value - "optimizer": "sgd", # str key, str value + config = { + "batch_size": 32, # int value + "learning_rate": 0.01, # float value + "optimizer": "sgd", # str value + "dropout": True, # bool value } -Flower serializes these configuration dictionaries (or *config dict* for short) to their -ProtoBuf representation, transports them to the client using gRPC, and then deserializes -them back to Python dictionaries. +Flower serializes these configuration dictionaries (or *config dicts* for short) to +their ProtoBuf representation, transports them to the client using gRPC, and then +deserializes them back to Python dictionaries. .. note:: Currently, there is no support for directly sending collection types (e.g., ``Set``, - ``List``, ``Map``) as values in configuration dictionaries. There are several - workarounds to send collections as values by converting them to one of the supported - value types (and converting them back on the client-side). + ``List``, ``Map``) as values in configuration dictionaries. To send collections, + convert them to a supported type (e.g., JSON string) and decode on the client side. + + Example: + + .. code-block:: python + + import json + + # On the server + config_dict = {"data_splits": json.dumps([0.8, 0.1, 0.1])} - One can, for example, convert a list of floating-point numbers to a JSON string, - then send the JSON string using the configuration dictionary, and then convert the - JSON string back to a list of floating-point numbers on the client. + # On the client + data_splits = json.loads(config["data_splits"]) -Configuration through built-in strategies +Configuration through Built-in Strategies ----------------------------------------- -The easiest way to send configuration values to clients is to use a built-in strategy -like ``FedAvg``. Built-in strategies support so-called configuration functions. A -configuration function is a function that the built-in strategy calls to get the -configuration dictionary for the current round. It then forwards the configuration -dictionary to all the clients selected during that round. +Flower provides configuration options to control client behavior dynamically through +``FitConfig`` and ``EvaluateConfig``. These configurations allow server-side control +over client-side parameters such as batch size, number of local epochs, learning rate, +and evaluation settings, improving collaboration and experimentation. -Let's start with a simple example. Imagine we want to send (a) the batch size that the -client should use, (b) the current global round of federated learning, and (c) the -number of epochs to train on the client-side. Our configuration function could look like -this: +``FitConfig`` and ``EvaluateConfig`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``FitConfig`` and ``EvaluateConfig`` are dictionaries containing configuration values +that the server sends to clients during federated learning rounds. These dictionaries +enable the server to adjust client-side hyperparameters and monitor progress +effectively. + +``FitConfig`` ++++++++++++++ + +``FitConfig`` specifies the hyperparameters for training rounds, such as the batch size, +number of local epochs, and other parameters that influence training. + +For example, a ``fit_config`` callback might look like this: .. code-block:: python + import json + + def fit_config(server_round: int): - """Return training configuration dict for each round.""" + """Generate training configuration for each round.""" + # Create the configuration dictionary config = { "batch_size": 32, "current_round": server_round, "local_epochs": 2, + "data_splits": json.dumps([0.8, 0.1, 0.1]), # Example of serialized list } return config -To make the built-in strategies use this function, we can pass it to ``FedAvg`` during -initialization using the parameter ``on_fit_config_fn``: +You can then pass this ``fit_config`` callback to a built-in strategy such as +``FedAvg``: .. code-block:: python + from flwr.server.strategy import FedAvg + strategy = FedAvg( - ..., # Other FedAvg parameters - on_fit_config_fn=fit_config, # The fit_config function we defined earlier + on_fit_config_fn=fit_config, # Pass the `fit_config` function ) -One the client side, we receive the configuration dictionary in ``fit``: +On the client side, the configuration is received in the ``fit`` method, where it can be +read and used: .. code-block:: python - class FlowerClient(flwr.client.NumPyClient): - def fit(parameters, config): - print(config["batch_size"]) # Prints `32` - print(config["current_round"]) # Prints `1`/`2`/`...` - print(config["local_epochs"]) # Prints `2` - # ... (rest of `fit` method) - -There is also an `on_evaluate_config_fn` to configure evaluation, which works the same -way. They are separate functions because one might want to send different configuration -values to `evaluate` (for example, to use a different batch size). - -The built-in strategies call this function every round (that is, every time -`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling -`on_evaluate_config_fn` every round allows us to vary/change the config dict over -consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to -increase the number of local epochs during later rounds, we could do the following: + import json + + from flwr.client import NumPyClient + + + class FlowerClient(NumPyClient): + def fit(self, parameters, config): + # Read configuration values + batch_size = config["batch_size"] + local_epochs = config["local_epochs"] + data_splits = json.loads(config["data_splits"]) # Deserialize JSON + + # Use configuration values + print(f"Training with batch size {batch_size}, epochs {local_epochs}") + print(f"Data splits: {data_splits}") + # Training logic here + +``EvaluateConfig`` +++++++++++++++++++ + +``EvaluateConfig`` specifies hyperparameters for the evaluation process, such as the +batch size, evaluation frequency, or metrics to compute during evaluation. + +For example, an ``evaluate_config`` callback might look like this: .. code-block:: python - def fit_config(server_round: int): - """Return training configuration dict for each round.""" + def evaluate_config(server_round: int): + """Generate evaluation configuration for each round.""" + # Create the configuration dictionary config = { - "batch_size": 32, + "batch_size": 64, "current_round": server_round, - "local_epochs": 1 if server_round < 2 else 2, + "metrics": ["accuracy"], # Example metrics to compute } return config -The ``FedAvg`` strategy will call this function *every round*. +You can pass this ``evaluate_config`` callback to a built-in strategy like ``FedAvg``: + +.. code-block:: python + + strategy = FedAvg( + on_evaluate_config_fn=evaluate_config # Assign the evaluate_config function + ) + +On the client side, the configuration is received in the ``evaluate`` method, where it +can be used during the evaluation process: + +.. code-block:: python + + from flwr.client import NumPyClient + + + class FlowerClient(NumPyClient): + def evaluate(self, parameters, config): + # Read configuration values + batch_size = config["batch_size"] + current_round = config["current_round"] + metrics = config["metrics"] + + # Use configuration values + print(f"Evaluating with batch size {batch_size}") + print(f"Metrics to compute: {metrics}") + + # Evaluation logic here -Configuring individual clients ------------------------------- + return 0.5, {"accuracy": 0.85} # Example return values -In some cases, it is necessary to send different configuration values to different -clients. +Example: Sending Training Configurations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This can be achieved by customizing an existing strategy or by :doc:`implementing a -custom strategy from scratch `. Here's a nonsensical -example that customizes ``FedAvg`` by adding a custom ``"hello": "world"`` configuration -key/value pair to the config dict of a *single client* (only the first client in the -list, the other clients in this round to not receive this "special" config value): +Imagine we want to send (a) the batch size, (b) the current global round, and (c) the +number of local epochs. Our configuration function could look like this: .. code-block:: python - class CustomClientConfigStrategy(fl.server.strategy.FedAvg): - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + def fit_config(server_round: int): + """Generate training configuration for each round.""" + return { + "batch_size": 32, + "current_round": server_round, + "local_epochs": 2, + } + +To use this function with a built-in strategy like ``FedAvg``, pass it to the ``FedAvg`` +constructor (typically in your ``server_fn``): + +.. code-block:: python + + from flwr.server import ServerApp, ServerAppComponents + from flwr.server.strategy import FedAvg + + + def server_fn(context): + """Define server behavior.""" + strategy = FedAvg( + on_fit_config_fn=fit_config, + # Other arguments... + ) + return ServerAppComponents(strategy=strategy, ...) + + + app = ServerApp(server_fn=server_fn) + +Client-Side Configuration ++++++++++++++++++++++++++ + +On the client side, configurations are received as input to the ``fit`` and ``evaluate`` +methods. For example: + +.. code-block:: python + + class FlowerClient(flwr.client.NumPyClient): + def fit(self, parameters, config): + print(config["batch_size"]) # Output: 32 + print(config["current_round"]) # Output: current round number + print(config["local_epochs"]) # Output: 2 + # Training logic here + + def evaluate(self, parameters, config): + # Handle evaluation configurations if needed + pass + +Dynamic Configurations per Round +++++++++++++++++++++++++++++++++ + +Configuration functions are called at the beginning of every round. This allows for +dynamic adjustments based on progress. For example, you can increase the number of local +epochs in later rounds: + +.. code-block:: python + + def fit_config(server_round: int): + """Dynamic configuration for training.""" + return { + "batch_size": 32, + "current_round": server_round, + "local_epochs": 1 if server_round < 3 else 2, + } + +Customizing Client Configurations +--------------------------------- + +In some cases, it may be necessary to send different configurations to individual +clients. To achieve this, you can create a custom strategy by extending a built-in one, +such as ``FedAvg``: + +Example: Client-Specific Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from flwr.server.strategy import FedAvg + + + class CustomClientConfigStrategy(FedAvg): + def configure_fit(self, server_round, parameters, client_manager): client_instructions = super().configure_fit( server_round, parameters, client_manager ) - # Add special "hello": "world" config key/value pair, - # but only to the first client in the list - _, fit_ins = client_instructions[0] # First (ClientProxy, FitIns) pair - fit_ins.config["hello"] = "world" # Change config for this client only + # Modify configuration for a specific client + client_proxy, fit_ins = client_instructions[0] + fit_ins.config["special_key"] = "special_value" return client_instructions +Next, use this custom strategy as usual: - # Create strategy and run server - strategy = CustomClientConfigStrategy( - # ... (same arguments as plain FedAvg here) - ) - fl.server.start_server(strategy=strategy) +.. code-block:: python + + def server_fn(context): + strategy = CustomClientConfigStrategy( + # Other FedAvg parameters + ) + return ServerAppComponents(strategy=strategy, ...) + + + app = ServerApp(server_fn=server_fn) + +Summary of Enhancements +----------------------- + +- **Dynamic Configurations**: Enables per-round adjustments via functions. +- **Advanced Customization**: Supports client-specific strategies. +- **Client-Side Integration**: Configurations accessible in ``fit`` and ``evaluate``. diff --git a/doc/source/how-to-configure-logging.rst b/doc/source/how-to-configure-logging.rst deleted file mode 100644 index bb7461390b42..000000000000 --- a/doc/source/how-to-configure-logging.rst +++ /dev/null @@ -1,148 +0,0 @@ -Configure logging -================= - -The Flower logger keeps track of all core events that take place in federated learning -workloads. It presents information by default following a standard message format: - -.. code-block:: python - - DEFAULT_FORMATTER = logging.Formatter( - "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" - ) - -containing relevant information including: log message level (e.g. ``INFO``, ``DEBUG``), -a timestamp, the line where the logging took place from, as well as the log message -itself. In this way, the logger would typically display information on your terminal as -follows: - -.. code-block:: bash - - ... - INFO flwr 2023-07-15 15:32:30,935 | server.py:125 | fit progress: (3, 392.5575705766678, {'accuracy': 0.2898}, 13.781953627998519) - DEBUG flwr 2023-07-15 15:32:30,935 | server.py:173 | evaluate_round 3: strategy sampled 25 clients (out of 100) - DEBUG flwr 2023-07-15 15:32:31,388 | server.py:187 | evaluate_round 3 received 25 results and 0 failures - DEBUG flwr 2023-07-15 15:32:31,388 | server.py:222 | fit_round 4: strategy sampled 10 clients (out of 100) - DEBUG flwr 2023-07-15 15:32:32,429 | server.py:236 | fit_round 4 received 10 results and 0 failures - INFO flwr 2023-07-15 15:32:33,516 | server.py:125 | fit progress: (4, 370.3378576040268, {'accuracy': 0.3294}, 16.36216809399957) - DEBUG flwr 2023-07-15 15:32:33,516 | server.py:173 | evaluate_round 4: strategy sampled 25 clients (out of 100) - DEBUG flwr 2023-07-15 15:32:33,966 | server.py:187 | evaluate_round 4 received 25 results and 0 failures - DEBUG flwr 2023-07-15 15:32:33,966 | server.py:222 | fit_round 5: strategy sampled 10 clients (out of 100) - DEBUG flwr 2023-07-15 15:32:34,997 | server.py:236 | fit_round 5 received 10 results and 0 failures - INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) - ... - -Saving log to file ------------------- - -By default, the Flower log is outputted to the terminal where you launch your Federated -Learning workload from. This applies for both gRPC-based federation (i.e. when you do -``fl.server.start_server``) and when using the ``VirtualClientEngine`` (i.e. when you do -``fl.simulation.start_simulation``). In some situations you might want to save this log -to disk. You can do so by calling the `fl.common.logger.configure() -`_ function. For -example: - -.. code-block:: python - - import flwr as fl - - ... - - # in your main file and before launching your experiment - # add an identifier to your logger - # then specify the name of the file where the log should be outputted to - fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") - - # then start your workload - fl.simulation.start_simulation(...) # or fl.server.start_server(...) - -With the above, Flower will record the log you see on your terminal to ``log.txt``. This -file will be created in the same directory as were you are running the code from. If we -inspect we see the log above is also recorded but prefixing with ``identifier`` each -line: - -.. code-block:: bash - - ... - myFlowerExperiment | INFO flwr 2023-07-15 15:32:30,935 | server.py:125 | fit progress: (3, 392.5575705766678, {'accuracy': 0.2898}, 13.781953627998519) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:30,935 | server.py:173 | evaluate_round 3: strategy sampled 25 clients (out of 100) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:31,388 | server.py:187 | evaluate_round 3 received 25 results and 0 failures - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:31,388 | server.py:222 | fit_round 4: strategy sampled 10 clients (out of 100) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:32,429 | server.py:236 | fit_round 4 received 10 results and 0 failures - myFlowerExperiment | INFO flwr 2023-07-15 15:32:33,516 | server.py:125 | fit progress: (4, 370.3378576040268, {'accuracy': 0.3294}, 16.36216809399957) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:33,516 | server.py:173 | evaluate_round 4: strategy sampled 25 clients (out of 100) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:33,966 | server.py:187 | evaluate_round 4 received 25 results and 0 failures - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:33,966 | server.py:222 | fit_round 5: strategy sampled 10 clients (out of 100) - myFlowerExperiment | DEBUG flwr 2023-07-15 15:32:34,997 | server.py:236 | fit_round 5 received 10 results and 0 failures - myFlowerExperiment | INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) - ... - -Log your own messages ---------------------- - -You might expand the information shown by default with the Flower logger by adding more -messages relevant to your application. You can achieve this easily as follows. - -.. code-block:: python - - # in the python file you want to add custom messages to the Flower log - from logging import INFO, DEBUG - from flwr.common.logger import log - - # For example, let's say you want to add to the log some info about the training on your client for debugging purposes - - - class FlowerClient(fl.client.NumPyClient): - def __init__( - self, - cid: int, - # ... - ): - self.cid = cid - self.net = net - # ... - - def fit(self, parameters, config): - log(INFO, f"Printing a custom INFO message at the start of fit() :)") - - set_params(self.net, parameters) - - log(DEBUG, f"Client {self.cid} is doing fit() with config: {config}") - - # ... - -In this way your logger will show, in addition to the default messages, the ones -introduced by the clients as specified above. - -.. code-block:: bash - - ... - INFO flwr 2023-07-15 16:18:21,726 | server.py:89 | Initializing global parameters - INFO flwr 2023-07-15 16:18:21,726 | server.py:276 | Requesting initial parameters from one random client - INFO flwr 2023-07-15 16:18:22,511 | server.py:280 | Received initial parameters from one random client - INFO flwr 2023-07-15 16:18:22,511 | server.py:91 | Evaluating initial parameters - INFO flwr 2023-07-15 16:18:25,200 | server.py:94 | initial parameters (loss, other metrics): 461.2934241294861, {'accuracy': 0.0998} - INFO flwr 2023-07-15 16:18:25,200 | server.py:104 | FL starting - DEBUG flwr 2023-07-15 16:18:25,200 | server.py:222 | fit_round 1: strategy sampled 10 clients (out of 100) - INFO flwr 2023-07-15 16:18:26,391 | main.py:64 | Printing a custom INFO message :) - DEBUG flwr 2023-07-15 16:18:26,391 | main.py:63 | Client 44 is doing fit() with config: {'epochs': 5, 'batch_size': 64} - INFO flwr 2023-07-15 16:18:26,391 | main.py:64 | Printing a custom INFO message :) - DEBUG flwr 2023-07-15 16:18:28,464 | main.py:63 | Client 99 is doing fit() with config: {'epochs': 5, 'batch_size': 64} - INFO flwr 2023-07-15 16:18:28,465 | main.py:64 | Printing a custom INFO message :) - DEBUG flwr 2023-07-15 16:18:28,519 | main.py:63 | Client 67 is doing fit() with config: {'epochs': 5, 'batch_size': 64} - INFO flwr 2023-07-15 16:18:28,519 | main.py:64 | Printing a custom INFO message :) - DEBUG flwr 2023-07-15 16:18:28,615 | main.py:63 | Client 11 is doing fit() with config: {'epochs': 5, 'batch_size': 64} - INFO flwr 2023-07-15 16:18:28,615 | main.py:64 | Printing a custom INFO message :) - DEBUG flwr 2023-07-15 16:18:28,617 | main.py:63 | Client 13 is doing fit() with config: {'epochs': 5, 'batch_size': 64} - ... - -Log to a remote service ------------------------ - -The ``fl.common.logger.configure`` function, also allows specifying a host to which logs -can be pushed (via ``POST``) through a native Python ``logging.handler.HTTPHandler``. -This is a particularly useful feature in ``gRPC``-based Federated Learning workloads -where otherwise gathering logs from all entities (i.e. the server and the clients) might -be cumbersome. Note that in Flower simulation, the server automatically displays all -logs. You can still specify a ``HTTPHandler`` should you wish to backup or analyze the -logs somewhere else. diff --git a/doc/source/how-to-design-stateful-clients.rst b/doc/source/how-to-design-stateful-clients.rst new file mode 100644 index 000000000000..8e3fe8c09b45 --- /dev/null +++ b/doc/source/how-to-design-stateful-clients.rst @@ -0,0 +1,300 @@ +Design stateful ClientApps +========================== + +.. _array: ref-api/flwr.common.Array.html + +.. _clientapp: ref-api/flwr.client.ClientApp.html + +.. _configsrecord: ref-api/flwr.common.ConfigsRecord.html + +.. _context: ref-api/flwr.common.Context.html + +.. _metricsrecord: ref-api/flwr.common.MetricsRecord.html + +.. _numpyclient: ref-api/flwr.client.NumPyClient.html + +.. _parametersrecord: ref-api/flwr.common.ParametersRecord.html + +.. _recordset: ref-api/flwr.common.RecordSet.html#recordset + +By design, ClientApp_ objects are stateless. This means that the ``ClientApp`` object is +recreated each time a new ``Message`` is to be processed. This behaviour is identical +with Flower's Simulation Engine and Deployment Engine. For the former, it allows us to +simulate the running of a large number of nodes on a single machine or across multiple +machines. For the latter, it enables each ``SuperNode`` to be part of multiple runs, +each running a different ``ClientApp``. + +When a ``ClientApp`` is executed it receives a Context_. This context is unique for each +``ClientApp``, meaning that subsequent executions of the same ``ClientApp`` from the +same node will receive the same ``Context`` object. In the ``Context``, the ``.state`` +attribute can be used to store information that you would like the ``ClientApp`` to have +access to for the duration of the run. This could be anything from intermediate results +such as the history of training losses (e.g. as a list of `float` values with a new +entry appended each time the ``ClientApp`` is executed), certain parts of the model that +should persist at the client side, or some other arbitrary Python objects. These items +would need to be serialized before saving them into the context. + +Saving metrics to the context +----------------------------- + +This section will demonstrate how to save metrics such as accuracy/loss values to the +Context_ so they can be used in subsequent executions of the ``ClientApp``. If your +``ClientApp`` makes use of NumPyClient_ then entire object is also re-created for each +call to methods like ``fit()`` or ``evaluate()``. + +Let's begin with a simple setting in which ``ClientApp`` is defined as follows. The +``evaluate()`` method only generates a random number and prints it. + +.. tip:: + + You can create a PyTorch project with ready-to-use ``ClientApp`` and other + components by running ``flwr new``. + +.. code-block:: python + + import random + from flwr.common import Context, ConfigsRecord + from flwr.client import ClientApp, NumPyClient + + + class SimpleClient(NumPyClient): + + def __init__(self): + self.n_val = [] + + def evaluate(self, parameters, config): + n = random.randint(0, 10) # Generate a random integer between 0 and 10 + self.n_val.append(n) + # Even though in this line `n_val` has the value returned in the line + # above, self.n_val will be re-initialized to an empty list the next time + # this `ClientApp` runs + return float(0.0), 1, {} + + + def client_fn(context: Context): + return SimpleClient().to_client() + + + # Finally, construct the clinetapp instance by means of the `client_fn` callback + app = ClientApp(client_fn=client_fn) + +Let's say we want to save that randomly generated integer and append it to a list that +persists in the context. To do that, you'll need to do two key things: + +1. Make the ``context.state`` reachable withing your client class +2. Initialise the appropiate record type (in this example we use ConfigsRecord_) and + save/read your entry when required. + +.. code-block:: python + + def SimpleClient(NumPyClient): + + def __init__(self, context: Context): + self.client_state = ( + context.state + ) # add a reference to the state of your ClientApp + if "eval_metrics" not in self.client_state.configs_records: + self.client_state.configs_records["eval_metrics"] = ConfigsRecord() + + # Print content of the state + # You'll see it persists previous entries of `n_val` + print(self.client_state.configs_records) + + def evaluate(self, parameters, config): + n = random.randint(0, 10) # Generate a random integer between 0 and 10 + # Add results into a `ConfigsRecord` object under the "n_val" key + # Note a `ConfigsRecord` is a special type of python Dictionary + eval_metrics = self.client_state.configs_records["eval_metrics"] + if "n_val" not in eval_metrics: + eval_metrics["n_val"] = [n] + else: + eval_metrics["n_val"].append(n) + + return float(0.0), 1, {} + + + def client_fn(context: Context): + return SimpleClient(context).to_client() # Note we pass the context + + + # Finally, construct the clinetapp instance by means of the `client_fn` callback + app = ClientApp(client_fn=client_fn) + +If you run the app, you'll see an output similar to the one below. See how after each +round the `n_val` entry in the context gets one additional integer ? Note that the order +in which the `ClientApp` logs these messages might differ slightly between rounds. + +.. code-block:: shell + + # round 1 (.evaluate() hasn't been executed yet, so that's why it's empty) + configs_records={'eval_metrics': {}} + configs_records={'eval_metrics': {}} + + # round 2 (note `eval_metrics` has results added in round 1) + configs_records={'eval_metrics': {'n_val': [2]}} + configs_records={'eval_metrics': {'n_val': [8]}} + + # round 3 (note `eval_metrics` has results added in round 1&2) + configs_records={'eval_metrics': {'n_val': [8, 2]}} + configs_records={'eval_metrics': {'n_val': [2, 9]}} + + # round 4 (note `eval_metrics` has results added in round 1&2&3) + configs_records={'eval_metrics': {'n_val': [2, 9, 4]}} + configs_records={'eval_metrics': {'n_val': [8, 2, 5]}} + +Saving model parameters to the context +-------------------------------------- + +Using ConfigsRecord_ or MetricsRecord_ to save "simple" components is fine (e.g., float, +integer, boolean, string, bytes, and lists of these types. Note that MetricsRecord_ only +supports float, integer, and lists of these types) Flower has a specific type of record, +a ParametersRecord_, for storing model parameters or more generally data arrays. + +Let's see a couple of examples of how to save NumPy arrays first and then how to save +parameters of PyTorch and TensorFlow models. + +.. note:: + + The examples below omit the definition of a ``ClientApp`` to keep the code blocks + concise. To make use of ``ParametersRecord`` objects in your ``ClientApp`` you can + follow the same principles as outlined earlier. + +Saving NumPy arrays to the context +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Elements stored in a `ParametersRecord` are of type Array_, which is a data structure +that holds ``bytes`` and metadata that can be used for deserialization. Let's see how to +create an ``Array`` from a NumPy array and insert it into a ``ParametersRecord``. Here +we will make use of the built-in serialization and deserialization mechanisms in Flower, +namely the ``flwr.common.array_from_numpy`` function and the `numpy()` method of an +Array_ object. + +.. note:: + + Array_ objects carry bytes as their main payload and additional metadata to use for + deserialization. You can implement your own serialization/deserialization if the + provided ``array_from_numpy`` doesn't fit your usecase. + +Let's see how to use those functions to store a NumPy array into the context. + +.. code-block:: python + + import numpy as np + from flwr.common import Context, ParametersRecord, array_from_numpy + + + # Let's create a simple NumPy array + arr_np = np.random.randn(3, 3) + + # If we print it + # array([[-1.84242409, -1.01539537, -0.46528405], + # [ 0.32991896, 0.55540414, 0.44085534], + # [-0.10758364, 1.97619858, -0.37120501]]) + + # Now, let's serialize it and construct an Array + arr = array_from_numpy(arr_np) + + # If we print it (note the binary data) + # Array(dtype='float64', shape=[3, 3], stype='numpy.ndarray', data=b'\x93NUMPY\x01\x00v\x00...) + + # It can be inserted in a ParametersRecord like this + p_record = ParametersRecord({"my_array": arr}) + + # Then, it can be added to the state in the context + context.state.parameters_records["some_parameters"] = p_record + +To extract the data in a ``ParametersRecord``, you just need to deserialize the array if +interest. For example, following the example above: + +.. code-block:: python + + # Get Array from context + arr = context.state.parameters_records["some_parameters"]["my_array"] + + # Deserialize it + arr_deserialized = arr.numpy() + + # If we print it (it should show the exact same values as earlier) + # array([[-1.84242409, -1.01539537, -0.46528405], + # [ 0.32991896, 0.55540414, 0.44085534], + # [-0.10758364, 1.97619858, -0.37120501]]) + +Saving PyTorch parameters to the context +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Following the NumPy example above, to save parameters of a PyTorch model a +straightforward way of doing so is to transform the parameters into their NumPy +representation and then proceed as shown earlier. Below is a simple self-contained +example for how to do this. + +.. code-block:: python + + import torch + import torch.nn as nn + import torch.nn.functional as F + from flwr.common import Array, ParametersRecord, array_from_numpy + + + class Net(nn.Module): + """A very simple model""" + + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 32, 5) + self.fc = nn.Linear(1024, 10) + + def forward(self, x): + x = F.relu(self.conv(x)) + return self.fc(x) + + + # Instantiate model as usual + model = Net() + + # Save all elements of the state_dict into a single RecordSet + p_record = ParametersRecord() + for k, v in model.state_dict().items(): + # Convert to NumPy, then to Array. Add to record + p_record[k] = array_from_numpy(v.detach().cpu().numpy()) + + # Add to a context + context.state.parameters_records["net_parameters"] = p_record + +Let say now you want to apply the parameters stored in your context to a new instance of +the model (as it happens each time a ``ClientApp`` is executed). You will need to: + +1. Deserialize each element in your specific ``ParametersRecord`` +2. Construct a ``state_dict`` and load it + +.. code-block:: python + + state_dict = {} + # Extract record from context + p_record = context.state.parameters_records["net_parameters"] + + # Deserialize arrays + for k, v in p_record.items(): + state_dict[k] = torch.from_numpy(v.numpy()) + + # Apply state dict to a new model instance + model_ = Net() + model_.load_state_dict(state_dict) + # now this model has the exact same parameters as the one created earlier + # You can verify this by doing + for p, p_ in zip(model.state_dict().values(), model_.state_dict().values()): + assert torch.allclose(p, p_), "`state_dict`s do not match" + +And that's it! Recall that even though this example shows how to store the entire +``state_dict`` in a ``ParametersRecord``, you can just save part of it. The process +would be identical, but you might need to adjust how it is loaded into an existing model +using PyTorch APIs. + +Saving Tensorflow/Keras parameters to the context +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Follow the same steps as done above but replace the ``state_dict`` logic with simply +`get_weights() `_ +to convert the model parameters to a list of NumPy arrays that can then be serialized +into an ``Array``. Then, after deserialization, use `set_weights() +`_ to apply the +new parameters to a model. diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst deleted file mode 100644 index cd8590bc3436..000000000000 --- a/doc/source/how-to-enable-ssl-connections.rst +++ /dev/null @@ -1,84 +0,0 @@ -Enable SSL connections -====================== - -This guide describes how to a SSL-enabled secure Flower server (``SuperLink``) can be -started and how a Flower client (``SuperNode``) can establish a secure connections to -it. - -A complete code example demonstrating a secure connection can be found `here -`_. - -The code example comes with a ``README.md`` file which explains how to start it. -Although it is already SSL-enabled, it might be less descriptive on how it does so. -Stick to this guide for a deeper introduction to the topic. - -Certificates ------------- - -Using SSL-enabled connections requires certificates to be passed to the server and -client. For the purpose of this guide we are going to generate self-signed certificates. -As this can become quite complex we are going to ask you to run the script in -``examples/advanced-tensorflow/certificates/generate.sh`` with the following command -sequence: - -.. code-block:: bash - - cd examples/advanced-tensorflow/certificates - ./generate.sh - -This will generate the certificates in -``examples/advanced-tensorflow/.cache/certificates``. - -The approach for generating SSL certificates in the context of this example can serve as -an inspiration and starting point, but it should not be used as a reference for -production environments. Please refer to other sources regarding the issue of correctly -generating certificates for production environments. For non-critical prototyping or -research projects, it might be sufficient to use the self-signed certificates generated -using the scripts mentioned in this guide. - -Server (SuperLink) ------------------- - -Use the following terminal command to start a sever (SuperLink) that uses the previously -generated certificates: - -.. code-block:: bash - - flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem - --ssl-keyfile certificates/server.key - -When providing certificates, the server expects a tuple of three certificates paths: CA -certificate, server certificate and server private key. - -Client (SuperNode) ------------------- - -Use the following terminal command to start a client (SuperNode) that uses the -previously generated certificates: - -.. code-block:: bash - - flower-supernode - --root-certificates certificates/ca.crt - --superlink 127.0.0.1:9092 - -When setting ``root_certificates``, the client expects a file path to PEM-encoded root -certificates. - -Conclusion ----------- - -You should now have learned how to generate self-signed certificates using the given -script, start an SSL-enabled server and have a client establish a secure connection to -it. - -Additional resources --------------------- - -These additional sources might be relevant if you would like to dive deeper into the -topic of certificates: - -- `Let's Encrypt `_ -- `certbot `_ diff --git a/doc/source/how-to-enable-tls-connections.rst b/doc/source/how-to-enable-tls-connections.rst new file mode 100644 index 000000000000..ad8c1b4e7e33 --- /dev/null +++ b/doc/source/how-to-enable-tls-connections.rst @@ -0,0 +1,127 @@ +Enable TLS connections +====================== + +This guide describes how to a TLS-enabled secure Flower server (``SuperLink``) can be +started and how a Flower client (``SuperNode``) can establish a secure connections to +it. + +A complete code example demonstrating a secure connection can be found `here +`_. + +The code example comes with a ``README.md`` file which explains how to start it. +Although it is already TLS-enabled, it might be less descriptive on how it does so. +Stick to this guide for a deeper introduction to the topic. + +Certificates +------------ + +Using TLS-enabled connections requires certificates to be passed to the server and +client. For the purpose of this guide we are going to generate self-signed certificates. +As this can become quite complex we are going to ask you to run the script in +``examples/advanced-tensorflow/certificates/generate.sh`` with the following command +sequence: + +.. code-block:: bash + + $ cd examples/advanced-tensorflow/certificates && \ + ./generate.sh + +This will generate the certificates in +``examples/advanced-tensorflow/.cache/certificates``. + +The approach for generating TLS certificates in the context of this example can serve as +an inspiration and starting point, but it should not be used as a reference for +production environments. Please refer to other sources regarding the issue of correctly +generating certificates for production environments. For non-critical prototyping or +research projects, it might be sufficient to use the self-signed certificates generated +using the scripts mentioned in this guide. + +Server (SuperLink) +------------------ + +Navigate to the ``examples/advanced-tensorflow`` folder (`here +`_) and use the +following terminal command to start a server (SuperLink) that uses the previously +generated certificates: + +.. code-block:: bash + + $ flower-superlink \ + --ssl-ca-certfile .cache/certificates/ca.crt \ + --ssl-certfile .cache/certificates/server.pem \ + --ssl-keyfile .cache/certificates/server.key + +When providing certificates, the server expects a tuple of three certificates paths: CA +certificate, server certificate and server private key. + +Clients (SuperNode) +------------------- + +Use the following terminal command to start a client (SuperNode) that uses the +previously generated certificates: + +.. code-block:: bash + + $ flower-supernode \ + --root-certificates .cache/certificates/ca.crt \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 0.0.0.0:9095 \ + --node-config="partition-id=0 num-partitions=10" + +When setting ``root_certificates``, the client expects a file path to PEM-encoded root +certificates. + +In another terminal, start a second SuperNode that uses the same certificates: + +.. code-block:: bash + + $ flower-supernode \ + --root-certificates .cache/certificates/ca.crt \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 0.0.0.0:9096 \ + --node-config="partition-id=1 num-partitions=10" + +Note that in the second SuperNode, if you run both on the same machine, you must specify +a different port for the ``ClientAppIO`` API address to avoid clashing with the first +SuperNode. + +Executing ``flwr run`` with TLS +------------------------------- + +The root certificates used for executing ``flwr run`` is specified in the +``pyproject.toml`` of your app. + +.. code-block:: toml + + [tool.flwr.federations.local-deployment] + address = "127.0.0.1:9093" + root-certificates = "./.cache/certificates/ca.crt" + +Note that the path to the ``root-certificates`` is relative to the root of the project. +Now, you can run the example by executing the following: + +.. code-block:: bash + + $ flwr run . local-deployment --stream + +Conclusion +---------- + +You should now have learned how to generate self-signed certificates using the given +script, start an TLS-enabled server and have two clients establish secure connections to +it. You should also have learned how to run your Flower project using ``flwr run`` with +TLS enabled. + +.. note:: + + For running a Docker setup with TLS enabled, please refer to + :doc:`docker/enable-tls`. + +Additional resources +-------------------- + +These additional sources might be relevant if you would like to dive deeper into the +topic of certificates: + +- `Let's Encrypt `_ +- `certbot `_ diff --git a/doc/source/how-to-implement-fedbn.rst b/doc/source/how-to-implement-fedbn.rst new file mode 100644 index 000000000000..1e3bd8e52ee5 --- /dev/null +++ b/doc/source/how-to-implement-fedbn.rst @@ -0,0 +1,103 @@ +Implement FedBN +=============== + +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload with `FedBN `_, a +federated training method designed for non-IID data. We are using PyTorch to train a +Convolutional Neural Network (with Batch Normalization layers) on the CIFAR-10 dataset. +When applying FedBN, only minor changes are needed compared to :doc:`Quickstart PyTorch +`. + +Model +----- + +A full introduction to federated learning with PyTorch and Flower can be found in +:doc:`Quickstart PyTorch `. This how-to guide varies only a +few details in ``task.py``. FedBN requires a model architecture (defined in class +``Net()``) that uses Batch Normalization layers: + +.. code-block:: python + + class Net(nn.Module): + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.bn1 = nn.BatchNorm2d(6) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.bn2 = nn.BatchNorm2d(16) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.bn3 = nn.BatchNorm1d(120) + self.fc2 = nn.Linear(120, 84) + self.bn4 = nn.BatchNorm1d(84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: Tensor) -> Tensor: + x = self.pool(F.relu(self.bn1(self.conv1(x)))) + x = self.pool(F.relu(self.bn2(self.conv2(x)))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.bn3(self.fc1(x))) + x = F.relu(self.bn4(self.fc2(x))) + x = self.fc3(x) + return x + +Try editing the model architecture, then run the project to ensure everything still +works: + +.. code-block:: bash + + flwr run . + +So far this should all look fairly familiar if you've used Flower with PyTorch before. + +FedBN +----- + +To adopt FedBN, only the ``get_parameters`` and ``set_parameters`` functions in +``task.py`` need to be revised. FedBN only changes the client-side by excluding batch +normalization parameters from being exchanged with the server. + +We revise the *client* logic by changing ``get_parameters`` and ``set_parameters`` in +``task.py``. The batch normalization parameters are excluded from model parameter list +when sending to or receiving from the server: + +.. code-block:: python + + class FlowerClient(NumPyClient): + """Flower client for CIFAR-10 image classification using PyTorch.""" + + # ... [other FlowerClient methods] + + def get_parameters(self, config) -> List[np.ndarray]: + # Return model parameters as a list of NumPy ndarrays + # Exclude parameters of BN layers when using FedBN + return [ + val.cpu().numpy() + for name, val in self.model.state_dict().items() + if "bn" not in name + ] + + def set_parameters(self, parameters: List[np.ndarray]) -> None: + # Set model parameters from a list of NumPy ndarrays + keys = [k for k in self.model.state_dict().keys() if "bn" not in k] + params_dict = zip(keys, parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + self.model.load_state_dict(state_dict, strict=False) + + ... + +To test the new appraoch, run the project again: + +.. code-block:: bash + + flwr run . + +Your PyTorch project now runs federated learning with FedBN. Congratulations! + +Next Steps +---------- + +The example is of course over-simplified since all clients load the exact same dataset. +This isn't realistic. You now have the tools to explore this topic further. How about +using different subsets of CIFAR-10 on each client? How about adding more clients? diff --git a/doc/source/how-to-monitor-simulation.rst b/doc/source/how-to-monitor-simulation.rst deleted file mode 100644 index f540e22a6a77..000000000000 --- a/doc/source/how-to-monitor-simulation.rst +++ /dev/null @@ -1,261 +0,0 @@ -Monitor simulation -================== - -Flower allows you to monitor system resources while running your simulation. Moreover, -the Flower simulation engine is powerful and enables you to decide how to allocate -resources per client manner and constrain the total usage. Insights from resource -consumption can help you make smarter decisions and speed up the execution time. - -The specific instructions assume you are using macOS and have the `Homebrew -`_ package manager installed. - -Downloads ---------- - -.. code-block:: bash - - brew install prometheus grafana - -`Prometheus `_ is used for data collection, while `Grafana -`_ will enable you to visualize the collected data. They are both -well integrated with `Ray `_ which Flower uses under the hood. - -Overwrite the configuration files (depending on your device, it might be installed on a -different path). - -If you are on an M1 Mac, it should be: - -.. code-block:: bash - - /opt/homebrew/etc/prometheus.yml - /opt/homebrew/etc/grafana/grafana.ini - -On the previous generation Intel Mac devices, it should be: - -.. code-block:: bash - - /usr/local/etc/prometheus.yml - /usr/local/etc/grafana/grafana.ini - -Open the respective configuration files and change them. Depending on your device, use -one of the two following commands: - -.. code-block:: bash - - # M1 macOS - open /opt/homebrew/etc/prometheus.yml - - # Intel macOS - open /usr/local/etc/prometheus.yml - -and then delete all the text in the file and paste a new Prometheus config you see -below. You may adjust the time intervals to your requirements: - -.. code-block:: bash - - global: - scrape_interval: 1s - evaluation_interval: 1s - - scrape_configs: - # Scrape from each ray node as defined in the service_discovery.json provided by ray. - - job_name: 'ray' - file_sd_configs: - - files: - - '/tmp/ray/prom_metrics_service_discovery.json' - -Now after you have edited the Prometheus configuration, do the same with the Grafana -configuration files. Open those using one of the following commands as before: - -.. code-block:: python - - # M1 macOS - open / opt / homebrew / etc / grafana / grafana.ini - - # Intel macOS - open / usr / local / etc / grafana / grafana.ini - -Your terminal editor should open and allow you to apply the following configuration as -before. - -.. code-block:: bash - - [security] - allow_embedding = true - - [auth.anonymous] - enabled = true - org_name = Main Org. - org_role = Viewer - - [paths] - provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning - -Congratulations, you just downloaded all the necessary software needed for metrics -tracking. Now, let’s start it. - -Tracking metrics ----------------- - -Before running your Flower simulation, you have to start the monitoring tools you have -just installed and configured. - -.. code-block:: bash - - brew services start prometheus - brew services start grafana - -Please include the following argument in your Python code when starting a simulation. - -.. code-block:: python - - fl.simulation.start_simulation( - # ... - # all the args you used before - # ... - ray_init_args={"include_dashboard": True} - ) - -Now, you are ready to start your workload. - -Shortly after the simulation starts, you should see the following logs in your terminal: - -.. code-block:: bash - - 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 - -You can look at everything at http://127.0.0.1:8265 . - -It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest -option). - -Or alternatively, you can just see them in Grafana by clicking on the right-up corner, -“View in Grafana”. Please note that the Ray dashboard is only accessible during the -simulation. After the simulation ends, you can only use Grafana to explore the metrics. -You can start Grafana by going to ``http://localhost:3000/``. - -After you finish the visualization, stop Prometheus and Grafana. This is important as -they will otherwise block, for example port ``3000`` on your machine as long as they are -running. - -.. code-block:: bash - - brew services stop prometheus - brew services stop grafana - -Resource allocation -------------------- - -You must understand how the Ray library works to efficiently allocate system resources -to simulation clients on your own. - -Initially, the simulation (which Ray handles under the hood) starts by default with all -the available resources on the system, which it shares among the clients. It doesn't -mean it divides it equally among all of them, nor that the model training happens at all -of them simultaneously. You will learn more about that in the later part of this blog. -You can check the system resources by running the following: - -.. code-block:: python - - import ray - - ray.available_resources() - -In Google Colab, the result you see might be similar to this: - -.. code-block:: bash - - {'memory': 8020104807.0, - 'GPU': 1.0, - 'object_store_memory': 4010052403.0, - 'CPU': 2.0, - 'accelerator_type:T4': 1.0, - 'node:172.28.0.2': 1.0} - -However, you can overwrite the defaults. When starting a simulation, do the following -(you don't need to overwrite all of them): - -.. code-block:: python - - num_cpus = 2 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args={ - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - } - ) - -Let’s also specify the resource for a single client. - -.. code-block:: python - - # Total resources for simulation - num_cpus = 4 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - - # Single client resources - client_num_cpus = 2 - client_num_gpus = 1 - - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args={ - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - }, - # The argument below is new - client_resources={ - "num_cpus": client_num_cpus, - "num_gpus": client_num_gpus, - }, - ) - -Now comes the crucial part. Ray will start a new client only when it has all the -required resources (such that they run in parallel) when the resources allow. - -In the example above, only one client will be run, so your clients won't run -concurrently. Setting ``client_num_gpus = 0.5`` would allow running two clients and -therefore enable them to run concurrently. Be careful not to require more resources than -available. If you specified ``client_num_gpus = 2``, the simulation wouldn't start (even -if you had 2 GPUs but decided to set 1 in ``ray_init_args``). - -FAQ ---- - -Q: I don't see any metrics logged. - -A: The timeframe might not be properly set. The setting is in the top right corner -("Last 30 minutes" by default). Please change the timeframe to reflect the period when -the simulation was running. - -Q: I see “Grafana server not detected. Please make sure the Grafana server is running -and refresh this page” after going to the Metrics tab in Ray Dashboard. - -A: You probably don't have Grafana running. Please check the running services - -.. code-block:: bash - - brew services list - -Q: I see "This site can't be reached" when going to http://127.0.0.1:8265. - -A: Either the simulation has already finished, or you still need to start Prometheus. - -Resources ---------- - -Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-started.html - -Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html diff --git a/doc/source/how-to-run-simulations.rst b/doc/source/how-to-run-simulations.rst index fb4eed17b4e7..6619f2054f57 100644 --- a/doc/source/how-to-run-simulations.rst +++ b/doc/source/how-to-run-simulations.rst @@ -1,291 +1,368 @@ +.. |clientapp_link| replace:: ``ClientApp`` + +.. |message_link| replace:: ``Message`` + +.. |context_link| replace:: ``Context`` + +.. |flwr_run_link| replace:: ``flwr run`` + +.. |flwr_new_link| replace:: ``flwr new`` + +.. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _context_link: ref-api/flwr.common.Context.html + +.. _flwr_new_link: ref-api-cli.html#flwr-new + +.. _flwr_run_link: ref-api-cli.html#flwr-run + +.. _message_link: ref-api/flwr.common.Message.html + Run simulations =============== -.. youtube:: cRebUIGB5RU - :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB - :width: 100% - -Simulating Federated Learning workloads is useful for a multitude of use-cases: you -might want to run your workload on a large cohort of clients but without having to -source, configure and mange a large number of physical devices; you might want to run -your FL workloads as fast as possible on the compute systems you have access to without -having to go through a complex setup process; you might want to validate your algorithm -on different scenarios at varying levels of data and system heterogeneity, client -availability, privacy budgets, etc. These are among some of the use-cases where -simulating FL workloads makes sense. Flower can accommodate these scenarios by means of -its `VirtualClientEngine -`_ or VCE. - -The ``VirtualClientEngine`` schedules, launches and manages `virtual` clients. These -clients are identical to `non-virtual` clients (i.e. the ones you launch via the command -`flwr.client.start_client `_) in the sense that they can -be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient -`_ and therefore behave in an identical way. -In addition to that, clients managed by the ``VirtualClientEngine`` are: - -- resource-aware: this means that each client gets assigned a portion of the compute and - memory on your system. You as a user can control this at the beginning of the - simulation and allows you to control the degree of parallelism of your Flower FL - simulation. The fewer the resources per client, the more clients can run concurrently - on the same hardware. -- self-managed: this means that you as a user do not need to launch clients manually, - instead this gets delegated to ``VirtualClientEngine``'s internals. -- ephemeral: this means that a client is only materialized when it is required in the FL - process (e.g. to do `fit() `_). The object - is destroyed afterwards, releasing the resources it was assigned and allowing in this - way other clients to participate. - -The ``VirtualClientEngine`` implements `virtual` clients using `Ray +Simulating Federated Learning workloads is useful for a multitude of use cases: you +might want to run your workload on a large cohort of clients without having to source, +configure, and manage a large number of physical devices; you might want to run your FL +workloads as fast as possible on the compute systems you have access to without going +through a complex setup process; you might want to validate your algorithm in different +scenarios at varying levels of data and system heterogeneity, client availability, +privacy budgets, etc. These are among some of the use cases where simulating FL +workloads makes sense. + +Flower's ``Simulation Engine`` schedules, launches, and manages |clientapp_link|_ +instances. It does so through a ``Backend``, which contains several workers (i.e., +Python processes) that can execute a ``ClientApp`` by passing it a |context_link|_ and a +|message_link|_. These ``ClientApp`` objects are identical to those used by Flower's +`Deployment Engine `_, making alternating +between *simulation* and *deployment* an effortless process. The execution of +``ClientApp`` objects through Flower's ``Simulation Engine`` is: + +- **Resource-aware**: Each backend worker executing ``ClientApp``\s gets assigned a + portion of the compute and memory on your system. You can define these at the + beginning of the simulation, allowing you to control the degree of parallelism of your + simulation. For a fixed total pool of resources, the fewer the resources per backend + worker, the more ``ClientApps`` can run concurrently on the same hardware. +- **Batchable**: When there are more ``ClientApps`` to execute than backend workers, + ``ClientApps`` are queued and executed as soon as resources are freed. This means that + ``ClientApps`` are typically executed in batches of N, where N is the number of + backend workers. +- **Self-managed**: This means that you, as a user, do not need to launch ``ClientApps`` + manually; instead, the ``Simulation Engine``'s internals orchestrates the execution of + all ``ClientApp``\s. +- **Ephemeral**: This means that a ``ClientApp`` is only materialized when it is + required by the application (e.g., to do `fit() + `_). The object is destroyed afterward, + releasing the resources it was assigned and allowing other clients to participate. + +.. note:: + + You can preserve the state (e.g., internal variables, parts of an ML model, + intermediate results) of a ``ClientApp`` by saving it to its ``Context``. Check the + `Designing Stateful Clients `_ guide for a + complete walkthrough. + +The ``Simulation Engine`` delegates to a ``Backend`` the role of spawning and managing +``ClientApps``. The default backend is the ``RayBackend``, which uses `Ray `_, an open-source framework for scalable Python workloads. In -particular, Flower's ``VirtualClientEngine`` makes use of `Actors -`_ to spawn `virtual` clients and -run their workload. +particular, each worker is an `Actor +`_ capable of spawning a +``ClientApp`` given its ``Context`` and a ``Message`` to process. Launch your Flower simulation ----------------------------- -Running Flower simulations still require you to define your client class, a strategy, -and utility functions to download and load (and potentially partition) your dataset. -With that out of the way, launching your simulation is done with `start_simulation -`_ and a minimal example looks as -follows: +Running a simulation is straightforward; in fact, it is the default mode of operation +for |flwr_run_link|_. Therefore, running Flower simulations primarily requires you to +first define a ``ClientApp`` and a ``ServerApp``. A convenient way to generate a minimal +but fully functional Flower app is by means of the |flwr_new_link|_ command. There are +multiple templates to choose from. The example below uses the ``PyTorch`` template. -.. code-block:: python +.. tip:: - import flwr as fl - from flwr.server.strategy import FedAvg + If you haven't already, install Flower via ``pip install -U flwr`` in a Python + environment. +.. code-block:: shell - def client_fn(cid: str): - # Return a standard Flower client - return MyFlowerClient().to_client() + # or simply execute `flwr run` for a fully interactive process + flwr new my-app --framework="PyTorch" --username="alice" +Then, follow the instructions shown after completing the |flwr_new_link|_ command. When +you execute |flwr_run_link|_, you'll be using the ``Simulation Engine``. - # Launch the simulation - hist = fl.simulation.start_simulation( - client_fn=client_fn, # A function to run a _virtual_ client when required - num_clients=50, # Total number of clients available - config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds - strategy=FedAvg(), # A Flower strategy - ) +If we take a look at the ``pyproject.toml`` that was generated from the |flwr_new_link|_ +command (and loaded upon |flwr_run_link|_ execution), we see that a *default* federation +is defined. It sets the number of supernodes to 10. -VirtualClientEngine resources -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code-block:: toml -By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) -since that is also the default behavior when starting Ray. However, in some settings you -might want to limit how many of your system resources are used for simulation. You can -do this via the ``ray_init_args`` input argument to ``start_simulation`` which the VCE -internally passes to Ray's ``ray.init`` command. For a complete list of settings you can -configure check the `ray.init -`_ documentation. -Do not set ``ray_init_args`` if you want the VCE to use all your system's CPUs and GPUs. + [tool.flwr.federations] + default = "local-simulation" -.. code-block:: python + [tool.flwr.federations.local-simulation] + options.num-supernodes = 10 - import flwr as fl +You can modify the size of your simulations by adjusting ``options.num-supernodes``. - # Launch the simulation by limiting resources visible to Flower's VCE - hist = fl.simulation.start_simulation( - # ... - # Out of all CPUs and GPUs available in your system, - # only 8xCPUs and 1xGPUs would be used for simulation. - ray_init_args={"num_cpus": 8, "num_gpus": 1} - ) +Simulation examples +~~~~~~~~~~~~~~~~~~~ + +In addition to the quickstart tutorials in the documentation (e.g., `quickstart PyTorch +Tutorial `_, `quickstart JAX Tutorial +`_), most examples in the Flower repository are +simulation-ready. + +- `Quickstart TensorFlow/Keras + `_. +- `Quickstart PyTorch + `_ +- `Advanced PyTorch + `_ +- `Quickstart MLX `_ +- `ViT fine-tuning `_ + +The complete list of examples can be found in `the Flower GitHub +`_. -Assigning client resources -~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. _clientappresources: -By default the ``VirtualClientEngine`` assigns a single CPU core (and nothing else) to -each virtual client. This means that if your system has 10 cores, that many virtual -clients can be concurrently running. +Defining ``ClientApp`` resources +-------------------------------- -More often than not, you would probably like to adjust the resources your clients get -assigned based on the complexity (i.e. compute and memory footprint) of your FL -workload. You can do so when starting your simulation by setting the argument -`client_resources` to `start_simulation -`_. Two keys are internally used by -Ray to schedule and spawn workloads (in our case Flower clients): +By default, the ``Simulation Engine`` assigns two CPU cores to each backend worker. This +means that if your system has 10 CPU cores, five backend workers can be running in +parallel, each executing a different ``ClientApp`` instance. -- ``num_cpus`` indicates the number of CPU cores a client would get. -- ``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned. +More often than not, you would probably like to adjust the resources your ``ClientApp`` +gets assigned based on the complexity (i.e., compute and memory footprint) of your +workload. You can do so by adjusting the backend resources for your federation. -Let's see a few examples: +.. caution:: + + Note that the resources the backend assigns to each worker (and hence to each + ``ClientApp`` being executed) are assigned in a *soft* manner. This means that the + resources are primarily taken into account in order to control the degree of + parallelism at which ``ClientApp`` instances should be executed. Resource assignment + is **not strict**, meaning that if you specified your ``ClientApp`` is assumed to + make use of 25% of the available VRAM but it ends up using 50%, it might cause other + ``ClientApp`` instances to crash throwing an out-of-memory (OOM) error. + +Customizing resources can be done directly in the ``pyproject.toml`` of your app. + +.. code-block:: toml + + [tool.flwr.federations.local-simulation] + options.num-supernodes = 10 + options.backend.client-resources.num-cpus = 1 # each ClientApp assumes to use 1 CPU (default is 2) + options.backend.client-resources.num-gpus = 0.0 # no GPU access to the ClientApp (default is 0.0) + +With the above backend settings, your simulation will run as many ``ClientApps`` in +parallel as CPUs you have in your system. GPU resources for your ``ClientApp`` can be +assigned by specifying the **ratio** of VRAM each should make use of. + +.. code-block:: toml + + [tool.flwr.federations.local-simulation] + options.num-supernodes = 10 + options.backend.client-resources.num-cpus = 1 # each ClientApp assumes to use 1 CPU (default is 2) + options.backend.client-resources.num-gpus = 0.25 # each ClientApp uses 25% of VRAM (default is 0.0) + +.. note:: + + If you are using TensorFlow, you need to `enable memory growth + `_ so multiple + ``ClientApp`` instances can share a GPU. This needs to be done before launching the + simulation. To do so, set the environment variable + ``TF_FORCE_GPU_ALLOW_GROWTH="1"``. + +Let's see how the above configuration results in a different number of ``ClientApps`` +running in parallel depending on the resources available in your system. If your system +has: + +- 10x CPUs and 1x GPU: at most 4 ``ClientApps`` will run in parallel since each requires + 25% of the available VRAM. +- 10x CPUs and 2x GPUs: at most 8 ``ClientApps`` will run in parallel (VRAM-limited). +- 6x CPUs and 4x GPUs: at most 6 ``ClientApps`` will run in parallel (CPU-limited). +- 10x CPUs but 0x GPUs: you won't be able to run the simulation since not even the + resources for a single ``ClientApp`` can be met. + +A generalization of this is given by the following equation. It gives the maximum number +of ``ClientApps`` that can be executed in parallel on available CPU cores (SYS_CPUS) and +VRAM (SYS_GPUS). + +.. math:: + + N = \min\left(\left\lfloor \frac{\text{SYS_CPUS}}{\text{num_cpus}} \right\rfloor, \left\lfloor \frac{\text{SYS_GPUS}}{\text{num_gpus}} \right\rfloor\right) + +Both ``num_cpus`` (an integer higher than 1) and ``num_gpus`` (a non-negative real +number) should be set on a per ``ClientApp`` basis. If, for example, you want only a +single ``ClientApp`` to run on each GPU, then set ``num_gpus=1.0``. If, for example, a +``ClientApp`` requires access to two whole GPUs, you'd set ``num_gpus=2``. + +While the ``options.backend.client-resources`` can be used to control the degree of +concurrency in your simulations, this does not stop you from running hundreds or even +thousands of clients in the same round and having orders of magnitude more *dormant* +(i.e., not participating in a round) clients. Let's say you want to have 100 clients per +round but your system can only accommodate 8 clients concurrently. The ``Simulation +Engine`` will schedule 100 ``ClientApps`` to run and then will execute them in a +resource-aware manner in batches of 8. + +Simulation Engine resources +--------------------------- + +By default, the ``Simulation Engine`` has **access to all system resources** (i.e., all +CPUs, all GPUs). However, in some settings, you might want to limit how many of your +system resources are used for simulation. You can do this in the ``pyproject.toml`` of +your app by setting the ``options.backend.init_args`` variable. + +.. code-block:: toml + + [tool.flwr.federations.local-simulation] + options.num-supernodes = 10 + options.backend.client-resources.num-cpus = 1 # Each ClientApp will get assigned 1 CPU core + options.backend.client-resources.num-gpus = 0.5 # Each ClientApp will get 50% of each available GPU + options.backend.init_args.num_cpus = 1 # Only expose 1 CPU to the simulation + options.backend.init_args.num_gpus = 1 # Expose a single GPU to the simulation + +With the above setup, the Backend will be initialized with a single CPU and GPU. +Therefore, even if more CPUs and GPUs are available in your system, they will not be +used for the simulation. The example above results in a single ``ClientApp`` running at +any given point. + +For a complete list of settings you can configure, check the `ray.init +`_ documentation. + +For the highest performance, do not set ``options.backend.init_args``. + +Simulation in Colab/Jupyter +--------------------------- + +The preferred way of running simulations should always be |flwr_run_link|_. However, the +core functionality of the ``Simulation Engine`` can be used from within a Google Colab +or Jupyter environment by means of `run_simulation +`_. .. code-block:: python - import flwr as fl + from flwr.simulation import run_simulation - # each client gets 1xCPU (this is the default if no resources are specified) - my_client_resources = {"num_cpus": 1, "num_gpus": 0.0} - # each client gets 2xCPUs and half a GPU. (with a single GPU, 2 clients run concurrently) - my_client_resources = {"num_cpus": 2, "num_gpus": 0.5} - # 10 client can run concurrently on a single GPU, but only if you have 20 CPU threads. - my_client_resources = {"num_cpus": 2, "num_gpus": 0.1} + # Construct the ClientApp passing the client generation function + client_app = ClientApp(client_fn=client_fn) - # Launch the simulation - hist = fl.simulation.start_simulation( - # ... - client_resources=my_client_resources # A Python dict specifying CPU/GPU resources + # Create your ServerApp passing the server generation function + server_app = ServerApp(server_fn=server_fn) + + run_simulation( + server_app=server_app, + client_app=client_app, + num_supernodes=10, # equivalent to setting `num-supernodes` in the pyproject.toml ) -While the ``client_resources`` can be used to control the degree of concurrency in your -FL simulation, this does not stop you from running dozens, hundreds or even thousands of -clients in the same round and having orders of magnitude more `dormant` (i.e. not -participating in a round) clients. Let's say you want to have 100 clients per round but -your system can only accommodate 8 clients concurrently. The ``VirtualClientEngine`` -will schedule 100 jobs to run (each simulating a client sampled by the strategy) and -then will execute them in a resource-aware manner in batches of 8. +With ``run_simulation``, you can also control the amount of resources for your +``ClientApp`` instances. Do so by setting ``backend_config``. If unset, the default +resources are assigned (i.e., 2xCPUs per ``ClientApp`` and no GPU). -To understand all the intricate details on how resources are used to schedule FL clients -and how to define custom resources, please take a look at the `Ray documentation -`_. +.. code-block:: python -Simulation examples -~~~~~~~~~~~~~~~~~~~ + run_simulation( + # ... + backend_config={"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} + ) -A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and -PyTorch are provided in the `Flower repository `_. You -can run them on Google Colab too: +Refer to the `30 minutes Federated AI Tutorial +`_ +for a complete example on how to run Flower Simulations in Colab. -- `Tensorflow/Keras Simulation - `_: 100 - clients collaboratively train a MLP model on MNIST. -- `PyTorch Simulation - `_: 100 clients - collaboratively train a CNN model on MNIST. +.. _multinodesimulations: Multi-node Flower simulations ----------------------------- -Flower's ``VirtualClientEngine`` allows you to run FL simulations across multiple -compute nodes. Before starting your multi-node simulation ensure that you: - -1. Have the same Python environment in all nodes. -2. Have a copy of your code (e.g. your entire repo) in all nodes. -3. Have a copy of your dataset in all nodes (more about this in :ref:`simulation - considerations `) -4. Pass ``ray_init_args={"address"="auto"}`` to `start_simulation - `_ so the ``VirtualClientEngine`` - attaches to a running Ray instance. -5. Start Ray on you head node: on the terminal type ``ray start --head``. This command +Flower's ``Simulation Engine`` allows you to run FL simulations across multiple compute +nodes so that you're not restricted to running simulations on a _single_ machine. Before +starting your multi-node simulation, ensure that you: + +1. Have the same Python environment on all nodes. +2. Have a copy of your code on all nodes. +3. Have a copy of your dataset on all nodes. If you are using partitions from `Flower + Datasets `_, ensure the partitioning strategy its + parameterization are the same. The expectation is that the i-th dataset partition is + identical in all nodes. +4. Start Ray on your head node: on the terminal, type ``ray start --head``. This command will print a few lines, one of which indicates how to attach other nodes to the head node. -6. Attach other nodes to the head node: copy the command shown after starting the head - and execute it on terminal of a new node: for example ``ray start - --address='192.168.1.132:6379'`` +5. Attach other nodes to the head node: copy the command shown after starting the head + and execute it on the terminal of a new node (before executing |flwr_run_link|_). For + example: ``ray start --address='192.168.1.132:6379'``. Note that to be able to attach + nodes to the head node they should be discoverable by each other. With all the above done, you can run your code from the head node as you would if the -simulation was running on a single node. +simulation were running on a single node. In other words: + +.. code-block:: shell -Once your simulation is finished, if you'd like to dismantle your cluster you simply + # From your head node, launch the simulation + flwr run + +Once your simulation is finished, if you'd like to dismantle your cluster, you simply need to run the command ``ray stop`` in each node's terminal (including the head node). -Multi-node simulation good-to-know -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. note:: + + When attaching a new node to the head, all its resources (i.e., all CPUs, all GPUs) + will be visible by the head node. This means that the ``Simulation Engine`` can + schedule as many ``ClientApp`` instances as that node can possibly run. In some + settings, you might want to exclude certain resources from the simulation. You can + do this by appending ``--num-cpus=`` and/or + ``--num-gpus=`` in any ``ray start`` command (including when + starting the head). -Here we list a few interesting functionality when running multi-node FL simulations: +FAQ for Simulations +------------------- -User ``ray status`` to check all nodes connected to your head node as well as the total -resources available to the ``VirtualClientEngine``. +.. dropdown:: Can I make my ``ClientApp`` instances stateful? -When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will -be visible by the head node. This means that the ``VirtualClientEngine`` can schedule as -many `virtual` clients as that node can possible run. In some settings you might want to -exclude certain resources from the simulation. You can do this by appending -`--num-cpus=` and/or `--num-gpus=` in any ``ray -start`` command (including when starting the head) + Yes. Use the ``state`` attribute of the |context_link|_ object that is passed to the ``ClientApp`` to save variables, parameters, or results to it. Read the `Designing Stateful Clients `_ guide for a complete walkthrough. -.. _considerations-for-simulations: +.. dropdown:: Can I run multiple simulations on the same machine? -Considerations for simulations ------------------------------- + Yes, but bear in mind that each simulation isn't aware of the resource usage of the other. If your simulations make use of GPUs, consider setting the ``CUDA_VISIBLE_DEVICES`` environment variable to make each simulation use a different set of the available GPUs. Export such an environment variable before starting |flwr_run_link|_. -.. note:: +.. dropdown:: Do the CPU/GPU resources set for each ``ClientApp`` restrict how much compute/memory these make use of? - We are actively working on these fronts so to make it trivial to run any FL workload - with Flower simulation. - -The current VCE allows you to run Federated Learning workloads in simulation mode -whether you are prototyping simple scenarios on your personal laptop or you want to -train a complex FL pipeline across multiple high-performance GPU nodes. While we add -more capabilities to the VCE, the points below highlight some of the considerations to -keep in mind when designing your FL pipeline with Flower. We also highlight a couple of -current limitations in our implementation. - -GPU resources -~~~~~~~~~~~~~ - -The VCE assigns a share of GPU memory to a client that specifies the key ``num_gpus`` in -``client_resources``. This being said, Ray (used internally by the VCE) is by default: - -- not aware of the total VRAM available on the GPUs. This means that if you set - ``num_gpus=0.5`` and you have two GPUs in your system with different (e.g. 32GB and - 8GB) VRAM amounts, they both would run 2 clients concurrently. -- not aware of other unrelated (i.e. not created by the VCE) workloads are running on - the GPU. Two takeaways from this are: - - - Your Flower server might need a GPU to evaluate the `global model` after aggregation - (by instance when making use of the `evaluate method - `_) - - If you want to run several independent Flower simulations on the same machine you - need to mask-out your GPUs with ``CUDA_VISIBLE_DEVICES=""`` when launching - your experiment. - -In addition, the GPU resource limits passed to ``client_resources`` are not `enforced` -(i.e. they can be exceeded) which can result in the situation of client using more VRAM -than the ratio specified when starting the simulation. - -TensorFlow with GPUs -++++++++++++++++++++ - -When `using a GPU with TensorFlow `_ nearly your -entire GPU memory of all your GPUs visible to the process will be mapped. This is done -by TensorFlow for optimization purposes. However, in settings such as FL simulations -where we want to split the GPU into multiple `virtual` clients, this is not a desirable -mechanism. Luckily we can disable this default behavior by `enabling memory growth -`_. - -This would need to be done in the main process (which is where the server would run) and -in each Actor created by the VCE. By means of ``actor_kwargs`` we can pass the reserved -key `"on_actor_init_fn"` in order to specify a function to be executed upon actor -initialization. In this case, to enable GPU growth for TF workloads. It would look as -follows: + No. These resources are exclusively used by the simulation backend to control how many workers can be created on startup. Let's say N backend workers are launched, then at most N ``ClientApp`` instances will be running in parallel. It is your responsibility to ensure ``ClientApp`` instances have enough resources to execute their workload (e.g., fine-tune a transformer model). -.. code-block:: python +.. dropdown:: My ``ClientApp`` is triggering OOM on my GPU. What should I do? - import flwr as fl - from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth + It is likely that your `num_gpus` setting, which controls the number of ``ClientApp`` instances that can share a GPU, is too low (meaning too many ``ClientApps`` share the same GPU). Try the following: - # Enable GPU growth in the main thread (the one used by the - # server to quite likely run global evaluation using GPU) - enable_tf_gpu_growth() + 1. Set your ``num_gpus=1``. This will make a single ``ClientApp`` run on a GPU. + 2. Inspect how much VRAM is being used (use ``nvidia-smi`` for this). + 3. Based on the VRAM you see your single ``ClientApp`` using, calculate how many more would fit within the remaining VRAM. One divided by the total number of ``ClientApps`` is the ``num_gpus`` value you should set. - # Start Flower simulation - hist = fl.simulation.start_simulation( - # ... - actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. - }, - ) + Refer to :ref:`clientappresources` for more details. + + If your ``ClientApp`` is using TensorFlow, make sure you are exporting ``TF_FORCE_GPU_ALLOW_GROWTH="1"`` before starting your simulation. For more details, check. + +.. dropdown:: How do I know what's the right ``num_cpus`` and ``num_gpus`` for my ``ClientApp``? + + A good practice is to start by running the simulation for a few rounds with higher ``num_cpus`` and ``num_gpus`` than what is really needed (e.g., ``num_cpus=8`` and, if you have a GPU, ``num_gpus=1``). Then monitor your CPU and GPU utilization. For this, you can make use of tools such as ``htop`` and ``nvidia-smi``. If you see overall resource utilization remains low, try lowering ``num_cpus`` and ``num_gpus`` (recall this will make more ``ClientApp`` instances run in parallel) until you see a satisfactory system resource utilization. + + Note that if the workload on your ``ClientApp`` instances is not homogeneous (i.e., some come with a larger compute or memory footprint), you'd probably want to focus on those when coming up with a good value for ``num_gpus`` and ``num_cpus``. + +.. dropdown:: Can I assign different resources to each ``ClientApp`` instance? + + No. All ``ClientApp`` objects are assumed to make use of the same ``num_cpus`` and ``num_gpus``. When setting these values (refer to :ref:`clientappresources` for more details), ensure the ``ClientApp`` with the largest memory footprint (either RAM or VRAM) can run in your system with others like it in parallel. + +.. dropdown:: Can I run single simulation accross multiple compute nodes (e.g. GPU servers)? + + Yes. If you are using the ``RayBackend`` (the *default* backend) you can first interconnect your nodes through Ray's cli and then launch the simulation. Refer to :ref:`multinodesimulations` for a step-by-step guide. + +.. dropdown:: My ``ServerApp`` also needs to make use of the GPU (e.g., to do evaluation of the *global model* after aggregation). Is this GPU usage taken into account by the ``Simulation Engine``? + + No. The ``Simulation Engine`` only manages ``ClientApps`` and therefore is only aware of the system resources they require. If your ``ServerApp`` makes use of substantial compute or memory resources, factor that into account when setting ``num_cpus`` and ``num_gpus``. + +.. dropdown:: Can I indicate on what resource a specific instance of a ``ClientApp`` should run? Can I do resource placement? -This is precisely the mechanism used in `Tensorflow/Keras Simulation -`_ example. - -Multi-node setups -~~~~~~~~~~~~~~~~~ - -- The VCE does not currently offer a way to control on which node a particular `virtual` - client is executed. In other words, if more than a single node have the resources - needed by a client to run, then any of those nodes could get the client workload - scheduled onto. Later in the FL process (i.e. in a different round) the same client - could be executed by a different node. Depending on how your clients access their - datasets, this might require either having a copy of all dataset partitions on all - nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data - duplication. -- By definition virtual clients are `stateless` due to their ephemeral nature. A client - state can be implemented as part of the Flower client class but users need to ensure - this saved to persistent storage (e.g. a database, disk) and that can be retrieve - later by the same client regardless on which node it is running from. This is related - to the point above also since, in some way, the client's dataset could be seen as a - type of `state`. + Currently, the placement of ``ClientApp`` instances is managed by the ``RayBackend`` (the only backend available as of ``flwr==1.13.0``) and cannot be customized. Implementing a *custom* backend would be a way of achieving resource placement. diff --git a/doc/source/how-to-save-and-load-model-checkpoints.rst b/doc/source/how-to-save-and-load-model-checkpoints.rst index f2f12dae97be..03440f0bb271 100644 --- a/doc/source/how-to-save-and-load-model-checkpoints.rst +++ b/doc/source/how-to-save-and-load-model-checkpoints.rst @@ -1,10 +1,10 @@ -Save and load model checkpoints +Save and Load Model Checkpoints =============================== Flower does not automatically save model updates on the server-side. This how-to guide describes the steps to save (and load) model checkpoints in Flower. -Model checkpointing +Model Checkpointing ------------------- Model updates can be persisted on the server-side by customizing ``Strategy`` methods. @@ -21,9 +21,9 @@ returns those aggregated weights to the caller (i.e., the server): def aggregate_fit( self, server_round: int, - results: List[Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics aggregated_parameters, aggregated_metrics = super().aggregate_fit( @@ -31,25 +31,30 @@ returns those aggregated weights to the caller (i.e., the server): ) if aggregated_parameters is not None: - # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + # Convert `Parameters` to `list[np.ndarray]` + aggregated_ndarrays: list[np.ndarray] = fl.common.parameters_to_ndarrays( aggregated_parameters ) - # Save aggregated_ndarrays + # Save aggregated_ndarrays to disk print(f"Saving round {server_round} aggregated_ndarrays...") np.savez(f"round-{server_round}-weights.npz", *aggregated_ndarrays) return aggregated_parameters, aggregated_metrics - # Create strategy and run server - strategy = SaveModelStrategy( - # (same arguments as FedAvg here) - ) - fl.server.start_server(strategy=strategy) + # Create strategy and pass into ServerApp + def server_fn(context): + strategy = SaveModelStrategy( + # (same arguments as FedAvg here) + ) + config = ServerConfig(num_rounds=3) + return ServerAppComponents(strategy=strategy, config=config) -Save and load PyTorch checkpoints + + app = ServerApp(server_fn=server_fn) + +Save and Load PyTorch Checkpoints --------------------------------- Similar to the previous example but with a few extra steps, we'll show how to store a @@ -67,9 +72,9 @@ returns a ``Parameters`` object that has to be transformed into a list of NumPy def aggregate_fit( self, server_round: int, - results: List[Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate model weights using weighted average and store checkpoint""" # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics @@ -80,17 +85,17 @@ returns a ``Parameters`` object that has to be transformed into a list of NumPy if aggregated_parameters is not None: print(f"Saving round {server_round} aggregated_parameters...") - # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + # Convert `Parameters` to `list[np.ndarray]` + aggregated_ndarrays: list[np.ndarray] = fl.common.parameters_to_ndarrays( aggregated_parameters ) - # Convert `List[np.ndarray]` to PyTorch`state_dict` + # Convert `list[np.ndarray]` to PyTorch `state_dict` params_dict = zip(net.state_dict().keys(), aggregated_ndarrays) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) net.load_state_dict(state_dict, strict=True) - # Save the model + # Save the model to disk torch.save(net.state_dict(), f"model_round_{server_round}.pth") return aggregated_parameters, aggregated_metrics @@ -110,3 +115,10 @@ this will iterate over all saved checkpoints and load the latest one: Return/use this object of type ``Parameters`` wherever necessary, such as in the ``initial_parameters`` when defining a ``Strategy``. + +Alternatively, we can save and load the model updates during evaluation phase by +overriding ``evaluate()`` or ``aggregate_evaluate()`` method of the strategy +(``FedAvg``). Checkout the details in `Advanced PyTorch Example +`_ and `Advanced +TensorFlow Example +`_. diff --git a/doc/source/how-to-upgrade-to-flower-1.0.rst b/doc/source/how-to-upgrade-to-flower-1.0.rst index 5f10f16a551f..7643c347ac0f 100644 --- a/doc/source/how-to-upgrade-to-flower-1.0.rst +++ b/doc/source/how-to-upgrade-to-flower-1.0.rst @@ -1,6 +1,15 @@ Upgrade to Flower 1.0 ===================== +.. note:: + + This guide is for users who have already worked with Flower 0.x and want to upgrade + to Flower 1.0. Newer versions of Flower (1.13 and later) are based on a new + architecture and not covered in this guide. After upgrading Flower 0.x projects to + Flower 1.0, please refer to :doc:`Upgrade to Flower 1.13 + ` to make your project compatible with the lastest + version of Flower. + Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few breaking changes that make it necessary to change the code of existing 0.x-series diff --git a/doc/source/how-to-upgrade-to-flower-1.13.rst b/doc/source/how-to-upgrade-to-flower-1.13.rst new file mode 100644 index 000000000000..41cb6efc8341 --- /dev/null +++ b/doc/source/how-to-upgrade-to-flower-1.13.rst @@ -0,0 +1,465 @@ +Upgrade to Flower 1.13 +====================== + +Welcome to the migration guide for updating Flower to Flower 1.13! Whether you're a +seasoned user or just getting started, this guide will help you smoothly transition your +existing setup to take advantage of the latest features and improvements in Flower 1.13. + +.. note:: + + This guide shows how to make pre-``1.13`` Flower code compatible with Flower 1.13 + (and later) with only minimal code changes. + +Let's dive in! + +.. + Generate link text as literal. Refs: + - https://stackoverflow.com/q/71651598 + - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 + +.. |clientapp_link| replace:: ``ClientApp`` + +.. |serverapp_link| replace:: ``ServerApp`` + +.. |runsim_link| replace:: ``run_simulation()`` + +.. |flower_superlink_link| replace:: ``flower-superlink`` + +.. |flower_supernode_link| replace:: ``flower-supernode`` + +.. |flower_architecture_link| replace:: Flower Architecture + +.. |flower_how_to_run_simulations_link| replace:: How-to Run Simulations + +.. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _flower_architecture_link: explanation-flower-architecture.html + +.. _flower_how_to_run_simulations_link: how-to-run-simulations.html + +.. _flower_superlink_link: ref-api-cli.html#flower-superlink + +.. _flower_supernode_link: ref-api-cli.html#flower-supernode + +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html + +.. _serverapp_link: ref-api/flwr.server.ServerApp.html + +Install update +-------------- + +Here's how to update an existing installation of Flower to Flower 1.13 with ``pip``: + +.. code-block:: bash + + $ python -m pip install -U flwr + +or if you need Flower 1.13 with simulation: + +.. code-block:: bash + + $ python -m pip install -U "flwr[simulation]" + +Ensure you set the following version constraint in your ``requirements.txt`` + +.. code-block:: + + # Without simulation support + flwr>=1.13,<2.0 + + # With simulation support + flwr[simulation]>=1.13, <2.0 + +or ``pyproject.toml``: + +.. code-block:: toml + + # Without simulation support + dependencies = [ + "flwr>=1.13,2.0", + ] + + # With simulation support + dependencies = [ + "flwr[simulation]>=1.13,2.0", + ] + +Required changes +---------------- + +Starting with Flower 1.8, the *infrastructure* and *application layers* have been +decoupled. Flower 1.13 enforces this separation further. Among other things, this allows +you to run the exact same code in a simulation as in a real deployment. + +Instead of starting a client in code via ``start_client()``, you create a +|clientapp_link|_. Instead of starting a server in code via ``start_server()``, you +create a |serverapp_link|_. Both ``ClientApp`` and ``ServerApp`` are started by the +long-running components of the server and client: the `SuperLink` and `SuperNode`, +respectively. + +.. tip:: + + For more details on SuperLink and SuperNode, please see the + |flower_architecture_link|_ . + +The following non-breaking changes require manual updates and allow you to run your +project both in the traditional (now deprecated) way and in the new (recommended) Flower +1.13 way: + +|clientapp_link|_ +~~~~~~~~~~~~~~~~~ + +- Wrap your existing client with |clientapp_link|_ instead of launching it via + ``start_client()``. Here's an example: + +.. code-block:: python + :emphasize-lines: 6,10 + + from flwr.client import ClientApp, start_client + from flwr.common import Context + + + # Flower 1.10 and later (recommended) + def client_fn(context: Context): + return FlowerClient().to_client() + + + app = ClientApp(client_fn=client_fn) + + + # # Flower 1.8 - 1.9 (deprecated, no longer supported) + # def client_fn(cid: str): + # return FlowerClient().to_client() + # + # + # app = ClientApp(client_fn=client_fn) + + + # Flower 1.7 (deprecated, only for backwards-compatibility) + if __name__ == "__main__": + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) + +|serverapp_link|_ +~~~~~~~~~~~~~~~~~ + +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server via + ``start_server()``. Here's an example: + +.. code-block:: python + :emphasize-lines: 7,13 + + from flwr.common import Context + from flwr.server import ServerApp, ServerAppComponents, ServerConfig, start_server + from flwr.server.strategy import FedAvg + + + # Flower 1.10 and later (recommended) + def server_fn(context: Context): + strategy = FedAvg() + config = ServerConfig() + return ServerAppComponents(config=config, strategy=strategy) + + + app = ServerApp(server_fn=server_fn) + + + # # Flower 1.8 - 1.9 (deprecated, no longer supported) + # app = flwr.server.ServerApp( + # config=config, + # strategy=strategy, + # ) + + + # Flower 1.7 (deprecated, only for backwards-compatibility) + if __name__ == "__main__": + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) + +Deployment +~~~~~~~~~~ + +- In a terminal window, start the SuperLink using |flower_superlink_link|_. Then, in two + additional terminal windows, start two SuperNodes using |flower_supernode_link|_ (2x). + There is no need to directly run ``client.py`` and ``server.py`` as Python scripts. +- Here's an example to start the server without HTTPS (insecure mode, only for + prototyping): + +.. tip:: + + For a comprehensive walk-through on how to deploy Flower using Docker, please refer + to the :doc:`docker/index` guide. + +.. code-block:: bash + :emphasize-lines: 2,5,12 + + # Start a SuperLink + $ flower-superlink --insecure + + # In a new terminal window, start a long-running SuperNode + $ flower-supernode \ + --insecure \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 127.0.0.1:9094 \ + + + # In another terminal window, start another long-running SuperNode (at least 2 SuperNodes are required) + $ flower-supernode \ + --insecure \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 127.0.0.1:9095 \ + + +- Here's another example to start both SuperLink and SuperNodes with HTTPS. Use the + ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line options + to pass paths to (CA certificate, server certificate, and server private key). + +.. code-block:: bash + :emphasize-lines: 2,8,15 + + # Start a secure SuperLink + $ flower-superlink \ + --ssl-ca-certfile \ + --ssl-certfile \ + --ssl-keyfile + + # In a new terminal window, start a long-running SuperNode + $ flower-supernode \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 127.0.0.1:9094 \ + --root-certificates \ + + + # In another terminal window, start another long-running SuperNode (at least 2 SuperNodes are required) + $ flower-supernode \ + --superlink 127.0.0.1:9092 \ + --clientappio-api-address 127.0.0.1:9095 \ + --root-certificates \ + + +Simulation (CLI) +~~~~~~~~~~~~~~~~ + +Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, +respectively. There is no need to use ``start_simulation()`` anymore. Here's an example: + +.. tip:: + + For a comprehensive guide on how to setup and run Flower simulations please read the + |flower_how_to_run_simulations_link|_ guide. + +.. code-block:: python + :emphasize-lines: 9,15,19,22,28 + + from flwr.client import ClientApp + from flwr.common import Context + from flwr.server import ServerApp, ServerAppComponents, ServerConfig + from flwr.server.strategy import FedAvg + from flwr.simulation import start_simulation + + + # Regular Flower client implementation + class FlowerClient(NumPyClient): + # ... + pass + + + # Flower 1.10 and later (recommended) + def client_fn(context: Context): + return FlowerClient().to_client() + + + app = ClientApp(client_fn=client_fn) + + + def server_fn(context: Context): + strategy = FedAvg(...) + config = ServerConfig(...) + return ServerAppComponents(strategy=strategy, config=config) + + + server_app = ServerApp(server_fn=server_fn) + + + # # Flower 1.8 - 1.9 (deprecated, no longer supported) + # def client_fn(cid: str): + # return FlowerClient().to_client() + # + # + # client_app = ClientApp(client_fn=client_fn) + # + # + # server_app = ServerApp( + # config=config, + # strategy=strategy, + # ) + + + # Flower 1.7 (deprecated, only for backwards-compatibility) + if __name__ == "__main__": + hist = start_simulation( + num_clients=10, + # ... + ) + +Depending on your Flower version, you can run your simulation as follows: + +- For Flower 1.11 and later, run ``flwr run`` in the terminal. This is the recommended + way to start simulations, other ways are deprecated and no longer recommended. +- DEPRECATED For Flower versions between 1.8 and 1.10, run ``flower-simulation`` in the + terminal and point to the ``server_app`` / ``client_app`` object in the code instead + of executing the Python script. In the code snippet below, there is an example + (assuming the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module). +- DEPRECATED For Flower versions before 1.8, run the Python script directly. + +.. code-block:: bash + :emphasize-lines: 2 + + # Flower 1.11 and later (recommended) + $ flwr run + + + # # Flower 1.8 - 1.10 (deprecated, no longer supported) + # $ flower-simulation \ + # --server-app=sim:server_app \ + # --client-app=sim:client_app \ + # --num-supernodes=10 + + + # Flower 1.7 (deprecated) + $ python sim.py + +Depending on your Flower version, you can also define the default resources as follows: + +- For Flower 1.11 and later, you can edit your ``pyproject.toml`` file and then run + ``flwr run`` in the terminal as shown in the example below. +- DEPRECATED For Flower versions between 1.8 and 1.10, you can adjust the resources for + each |clientapp_link|_ using the ``--backend-config`` command line argument instead of + setting the ``client_resources`` argument in ``start_simulation()``. +- DEPRECATED For Flower versions before 1.8, you need to run ``start_simulation()`` and + pass a dictionary of the required resources to the ``client_resources`` argument. + +.. code-block:: bash + :emphasize-lines: 2,8 + + # Flower 1.11 and later (recommended) + # [file: pyproject.toml] + [tool.flwr.federations.local-sim-gpu] + options.num-supernodes = 10 + options.backend.client-resources.num-cpus = 2 + options.backend.client-resources.num-gpus = 0.25 + + $ flwr run + + # # Flower 1.8 - 1.10 (deprecated, no longer supported) + # $ flower-simulation \ + # --client-app=sim:client_app \ + # --server-app=sim:server_app \ + # --num-supernodes=10 \ + # --backend-config='{"client_resources": {"num_cpus": 2, "num_gpus": 0.25}}' + +.. code-block:: python + + # Flower 1.7 (in `sim.py`, deprecated) + if __name__ == "__main__": + hist = start_simulation( + num_clients=10, client_resources={"num_cpus": 2, "num_gpus": 0.25}, ... + ) + +Simulation (Notebook) +~~~~~~~~~~~~~~~~~~~~~ + +To run your simulation from within a notebook, please consider the following examples +depending on your Flower version: + +- For Flower 1.11 and later, you need to run |runsim_link|_ in your notebook instead of + ``start_simulation()``. +- DEPRECATED For Flower versions between 1.8 and 1.10, you need to run |runsim_link|_ in + your notebook instead of ``start_simulation()`` and configure the resources. +- DEPRECATED For Flower versions before 1.8, you need to run ``start_simulation()`` and + pass a dictionary of the required resources to the ``client_resources`` argument. + +.. tip:: + + For a comprehensive guide on how to setup and run Flower simulations please read the + |flower_how_to_run_simulations_link|_ guide. + +.. code-block:: python + :emphasize-lines: 10,12,14-17 + + from flwr.client import ClientApp + from flwr.common import Context + from flwr.server import ServerApp + from flwr.simulation import run_simulation, start_simulation + + + # Flower 1.10 and later (recommended) + # Omitted: client_fn and server_fn + + client_app = ClientApp(client_fn=client_fn) + + server_app = ServerApp(server_fn=server_fn) + + run_simulation( + server_app=server_app, + client_app=client_app, + ) + + + # # Flower v1.8 - v1.10 (deprecated, no longer supported) + # NUM_CLIENTS = 10 # Replace by any integer greater than zero + # backend_config = {"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} + # + # + # def client_fn(cid: str): + # # ... + # return FlowerClient().to_client() + # + # + # client_app = ClientApp(client_fn=client_fn) + # + # server_app = ServerApp( + # config=config, + # strategy=strategy, + # ) + # + # run_simulation( + # server_app=server_app, + # client_app=client_app, + # num_supernodes=NUM_CLIENTS, + # backend_config=backend_config, + # ) + + + # Flower v1.7 (deprecated) + NUM_CLIENTS = 10 # Replace by any integer greater than zero + backend_config = {"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} + start_simulation( + client_fn=client_fn, + num_clients=NUM_CLIENTS, + config=config, + strategy=strategy, + client_resources=backend_config["client_resources"], + ) + +Further help +------------ + +Most official `Flower code examples `_ are already +updated to Flower 1.13 so they can serve as a reference for using the Flower 1.13 API. +If there are further questions, `join the Flower Slack `_ +(and use the channel ``#questions``) or post them on `Flower Discuss +`_ where you can find the community posting and answering +questions. + +.. admonition:: Important + + As we continuously enhance Flower at a rapid pace, we'll be periodically updating + this guide. Please feel free to share any feedback with us! + +Happy migrating! 🚀 diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst deleted file mode 100644 index 9a476f9865e1..000000000000 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ /dev/null @@ -1,366 +0,0 @@ -Upgrade to Flower Next -====================== - -Welcome to the migration guide for updating Flower to Flower Next! Whether you're a -seasoned user or just getting started, this guide will help you smoothly transition your -existing setup to take advantage of the latest features and improvements in Flower Next, -starting from version 1.8. - -.. note:: - - This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by - using the *compatibility layer* in Flower Next. In another guide, we will show how - to run Flower Next end-to-end with pure Flower Next APIs. - -Let's dive in! - -.. - Generate link text as literal. Refs: - - https://stackoverflow.com/q/71651598 - - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 - -.. |clientapp_link| replace:: ``ClientApp()`` - -.. |serverapp_link| replace:: ``ServerApp()`` - -.. |startclient_link| replace:: ``start_client()`` - -.. |startserver_link| replace:: ``start_server()`` - -.. |startsim_link| replace:: ``start_simulation()`` - -.. |runsim_link| replace:: ``run_simulation()`` - -.. |flowernext_superlink_link| replace:: ``flower-superlink`` - -.. |flowernext_clientapp_link| replace:: ``flower-client-app`` - -.. |flowernext_serverapp_link| replace:: ``flower-server-app`` - -.. |flower_simulation_link| replace:: ``flower-simulation`` - -.. _clientapp_link: ref-api/flwr.client.ClientApp.html - -.. _flower_simulation_link: ref-api-cli.html#flower-simulation - -.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app - -.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app - -.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink - -.. _runsim_link: ref-api/flwr.simulation.run_simulation.html - -.. _serverapp_link: ref-api/flwr.server.ServerApp.html - -.. _startclient_link: ref-api/flwr.client.start_client.html - -.. _startserver_link: ref-api/flwr.server.start_server.html - -.. _startsim_link: ref-api/flwr.simulation.start_simulation.html - -Install update --------------- - -Using pip -~~~~~~~~~ - -Here's how to update an existing installation of Flower to Flower Next with ``pip``: - -.. code-block:: bash - - $ python -m pip install -U flwr - -or if you need Flower Next with simulation: - -.. code-block:: bash - - $ python -m pip install -U "flwr[simulation]" - -Ensure you set the following version constraint in your ``requirements.txt`` - -.. code-block:: - - # Without simulation support - flwr>=1.8,<2.0 - - # With simulation support - flwr[simulation]>=1.8, <2.0 - -or ``pyproject.toml``: - -.. code-block:: toml - - # Without simulation support - dependencies = ["flwr>=1.8,2.0"] - - # With simulation support - dependencies = ["flwr[simulation]>=1.8,2.0"] - -Using Poetry -~~~~~~~~~~~~ - -Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to -delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). - -Ensure you set the following version constraint in your ``pyproject.toml``: - -.. code-block:: toml - :substitutions: - - [tool.poetry.dependencies] - python = "^|python_version|" - - # Without simulation support - flwr = ">=1.8,<2.0" - - # With simulation support - flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } - -Required changes ----------------- - -In Flower Next, the *infrastructure* and *application layers* have been decoupled. -Instead of starting a client in code via ``start_client()``, you create a -|clientapp_link|_ and start it via the command line. Instead of starting a server in -code via ``start_server()``, you create a |serverapp_link|_ and start it via the command -line. The long-running components of server and client are called SuperLink and -SuperNode. The following non-breaking changes that require manual updates and allow you -to run your project both in the traditional way and in the Flower Next way: - -|clientapp_link|_ -~~~~~~~~~~~~~~~~~ - -- Wrap your existing client with |clientapp_link|_ instead of launching it via - |startclient_link|_. Here's an example: - -.. code-block:: python - :emphasize-lines: 5,11 - - # Flower 1.8 - def client_fn(cid: str): - return flwr.client.FlowerClient().to_client() - - - app = flwr.client.ClientApp( - client_fn=client_fn, - ) - - # Flower 1.7 - if __name__ == "__main__": - flwr.client.start_client( - server_address="127.0.0.1:8080", - client=flwr.client.FlowerClient().to_client(), - ) - -|serverapp_link|_ -~~~~~~~~~~~~~~~~~ - -- Wrap your existing strategy with |serverapp_link|_ instead of starting the server via - |startserver_link|_. Here's an example: - -.. code-block:: python - :emphasize-lines: 2,9 - - # Flower 1.8 - app = flwr.server.ServerApp( - config=config, - strategy=strategy, - ) - - # Flower 1.7 - if __name__ == "__main__": - flwr.server.start_server( - server_address="0.0.0.0:8080", - config=config, - strategy=strategy, - ) - -Deployment -~~~~~~~~~~ - -- Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, - |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need - to execute `client.py` and `server.py` as Python scripts. -- Here's an example to start the server without HTTPS (only for prototyping): - -.. code-block:: bash - - # Start a Superlink - $ flower-superlink --insecure - - # In a new terminal window, start a long-running SuperNode - $ flower-client-app client:app --insecure - - # In another terminal window, start another long-running SuperNode (at least 2 SuperNodes are required) - $ flower-client-app client:app --insecure - - # In yet another terminal window, run the ServerApp (this starts the actual training run) - $ flower-server-app server:app --insecure - -- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, - ``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths to (CA - certificate, server certificate, and server private key). - -.. code-block:: bash - - # Start a secure Superlink - $ flower-superlink \ - --ssl-ca-certfile \ - --ssl-certfile \ - --ssl-keyfile - - # In a new terminal window, start a long-running secure SuperNode - $ flower-client-app client:app \ - --root-certificates \ - --superlink 127.0.0.1:9092 - - # In another terminal window, start another long-running secure SuperNode (at least 2 SuperNodes are required) - $ flower-client-app client:app \ - --root-certificates \ - --superlink 127.0.0.1:9092 - - # In yet another terminal window, run the ServerApp (this starts the actual training run) - $ flower-server-app server:app \ - --root-certificates \ - --superlink 127.0.0.1:9091 - -Simulation in CLI -~~~~~~~~~~~~~~~~~ - -- Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, - respectively. There is no need to use |startsim_link|_ anymore. Here's an example: - -.. code-block:: python - :emphasize-lines: 9,13,20 - - # Regular Flower client implementation - class FlowerClient(NumPyClient): - # ... - pass - - - # Flower 1.8 - def client_fn(cid: str): - return FlowerClient().to_client() - - - client_app = flwr.client.ClientApp( - client_fn=client_fn, - ) - - server_app = flwr.server.ServerApp( - config=config, - strategy=strategy, - ) - - # Flower 1.7 - if __name__ == "__main__": - hist = flwr.simulation.start_simulation( - num_clients=100, - # ... - ) - -- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` - object in the code instead of executing the Python script. Here's an example (assuming - the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): - -.. code-block:: bash - - # Flower 1.8 - $ flower-simulation \ - --server-app=sim:server_app \ - --client-app=sim:client_app \ - --num-supernodes=100 - -.. code-block:: bash - - # Flower 1.7 - $ python sim.py - -- Set default resources for each |clientapp_link|_ using the ``--backend-config`` - command line argument instead of setting the ``client_resources`` argument in - |startsim_link|_. Here's an example: - -.. code-block:: bash - :emphasize-lines: 6 - - # Flower 1.8 - $ flower-simulation \ - --client-app=sim:client_app \ - --server-app=sim:server_app \ - --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus": 2, "num_gpus": 0.25}}' - -.. code-block:: python - :emphasize-lines: 5 - - # Flower 1.7 (in `sim.py`) - if __name__ == "__main__": - hist = flwr.simulation.start_simulation( - num_clients=100, client_resources={"num_cpus": 2, "num_gpus": 0.25}, ... - ) - -Simulation in a Notebook -~~~~~~~~~~~~~~~~~~~~~~~~ - -- Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: - -.. code-block:: python - :emphasize-lines: 19,27 - - NUM_CLIENTS = 10 # Replace by any integer greater than zero - - - def client_fn(cid: str): - # ... - return FlowerClient().to_client() - - - client_app = flwr.client.ClientApp( - client_fn=client_fn, - ) - - server_app = flwr.server.ServerApp( - config=config, - strategy=strategy, - ) - - backend_config = {"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} - - # Flower 1.8 - flwr.simulation.run_simulation( - server_app=server_app, - client_app=client_app, - num_supernodes=NUM_CLIENTS, - backend_config=backend_config, - ) - - # Flower 1.7 - flwr.simulation.start_simulation( - client_fn=client_fn, - num_clients=NUM_CLIENTS, - config=config, - strategy=strategy, - client_resources=backend_config["client_resources"], - ) - -Further help ------------- - -Some official `Flower code examples `_ are already -updated to Flower Next so they can serve as a reference for using the Flower Next API. -If there are further questions, `join the Flower Slack `_ -and use the channel ``#questions``. You can also `participate in Flower Discuss -`_ where you can find us answering questions, or share and -learn from others about migrating to Flower Next. - -.. admonition:: Important - - As we continuously enhance Flower Next at a rapid pace, we'll be periodically - updating this guide. Please feel free to share any feedback with us! - -.. - [TODO] Add links to Flower Next 101 and Flower Glossary - -Happy migrating! 🚀 diff --git a/doc/source/how-to-use-differential-privacy.rst b/doc/source/how-to-use-differential-privacy.rst index 67e54271bb2e..687f6a908bfa 100644 --- a/doc/source/how-to-use-differential-privacy.rst +++ b/doc/source/how-to-use-differential-privacy.rst @@ -47,10 +47,10 @@ the corresponding input parameters. .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping, FedAvg # Create the strategy - strategy = fl.server.strategy.FedAvg(...) + strategy = FedAvg(...) # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper dp_strategy = DifferentialPrivacyServerSideFixedClipping( @@ -81,10 +81,10 @@ wrapper class and, on the client, ``fixedclipping_mod``: .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping, FedAvg # Create the strategy - strategy = fl.server.strategy.FedAvg(...) + strategy = FedAvg(...) # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper dp_strategy = DifferentialPrivacyClientSideFixedClipping( @@ -99,10 +99,11 @@ the matching ``fixedclipping_mod`` to perform the client-side clipping: .. code-block:: python + from flwr.client import ClientApp from flwr.client.mod import fixedclipping_mod # Add fixedclipping_mod to the client-side mods - app = fl.client.ClientApp( + app = ClientApp( client_fn=client_fn, mods=[ fixedclipping_mod, @@ -126,15 +127,18 @@ Below is a code example that shows how to use ``LocalDpMod``: .. code-block:: python - from flwr.client.mod.localdp_mod import LocalDpMod + from flwr.client import ClientApp + from flwr.client.mod import LocalDpMod # Create an instance of the mod with the required params local_dp_obj = LocalDpMod(cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta) - # Add local_dp_obj to the client-side mods - app = fl.client.ClientApp( + # Add local_dp_obj to the client-side mods + app = ClientApp( client_fn=client_fn, - mods=[local_dp_obj], + mods=[ + local_dp_obj, + ], ) Please note that the order of mods, especially those that modify parameters, is @@ -148,4 +152,4 @@ For ensuring data instance-level privacy during local model training on the clie consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples of using Flower with these engines, please refer to the Flower examples directory (`Opacus `_, `Tensorflow -Privacy `_). +Privacy `_). diff --git a/doc/source/how-to-use-strategies.rst b/doc/source/how-to-use-strategies.rst index b4803c6059b3..2dbdd4f04704 100644 --- a/doc/source/how-to-use-strategies.rst +++ b/doc/source/how-to-use-strategies.rst @@ -14,49 +14,75 @@ the server side: Use an existing strategy ------------------------ -Flower comes with a number of popular federated learning strategies built-in. A built-in -strategy can be instantiated as follows: +Flower comes with a number of popular federated learning Strategies which can be +instantiated as follows: .. code-block:: python - import flwr as fl + from flwr.common import Context + from flwr.server.strategy import FedAvg + from flwr.server import ServerApp, ServerAppComponents, ServerConfig - strategy = fl.server.strategy.FedAvg() - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -This creates a strategy with all parameters left at their default values and passes it -to the ``start_server`` function. It is usually recommended to adjust a few parameters -during instantiation: + def server_fn(context: Context): + # Optional context-based parameters specification + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) -.. code-block:: python + # Instantiate FedAvg strategy + strategy = FedAvg( + fraction_fit=context.run_config["fraction-fit"], + fraction_evaluate=1.0, + ) + + # Create and return ServerAppComponents + return ServerAppComponents(strategy=strategy, config=config) + + + # Create ServerApp + app = ServerApp(server_fn=server_fn) + +To make the ``ServerApp`` use this strategy, pass a ``server_fn`` function to the +``ServerApp`` constructor. The ``server_fn`` function should return a +``ServerAppComponents`` object that contains the strategy instance and a +``ServerConfig`` instance. + +Both ``Strategy`` and ``ServerConfig`` classes can be configured with parameters. The +``Context`` object passed to ``server_fn`` contains the values specified in the +``[tool.flwr.app.config]`` table in your ``pyproject.toml`` (a snippet is shown below). +To access these values, use ``context.run_config``. - import flwr as fl +.. code-block:: toml - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for the next round - min_fit_clients=10, # Minimum number of clients to be sampled for the next round - min_available_clients=80, # Minimum number of clients that need to be connected to the server before a training round can start - ) - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) + # ... + + [tool.flwr.app.config] + num-server-rounds = 10 + fraction-fit = 0.5 + + # ... Customize an existing strategy with callback functions ------------------------------------------------------ -Existing strategies provide several ways to customize their behaviour. Callback -functions allow strategies to call user-provided code during execution. +Existing strategies provide several ways to customize their behavior. Callback functions +allow strategies to call user-provided code during execution. This approach enables you +to modify the strategy's partial behavior without rewriting the whole class from zero. Configuring client fit and client evaluate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The server can pass new configuration values to the client each round by providing a function to ``on_fit_config_fn``. The provided function will be called by the strategy -and must return a dictionary of configuration key values pairs that will be sent to the +and must return a dictionary of configuration key value pairs that will be sent to the client. It must return a dictionary of arbitrary configuration values ``client.fit`` and ``client.evaluate`` functions during each round of federated learning. .. code-block:: python - import flwr as fl + from flwr.common import Context + from flwr.server.strategy import FedAvg + from flwr.server import ServerApp, ServerAppComponents, ServerConfig def get_on_fit_config_fn() -> Callable[[int], Dict[str, str]]: @@ -73,18 +99,32 @@ client. It must return a dictionary of arbitrary configuration values ``client.f return fit_config - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, - min_fit_clients=10, - min_available_clients=80, - on_fit_config_fn=get_on_fit_config_fn(), - ) - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) + def server_fn(context: Context): + # Read num_rounds from context + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + # Instantiate FedAvg strategy + strategy = FedAvg( + fraction_fit=context.run_config["fraction-fit"], + fraction_evaluate=1.0, + on_fit_config_fn=get_on_fit_config_fn(), + ) + + # Create and return ServerAppComponents + return ServerAppComponents(strategy=strategy, config=config) + + + # Create ServerApp + app = ServerApp(server_fn=server_fn) The ``on_fit_config_fn`` can be used to pass arbitrary configuration values from server -to client, and potentially change these values each round, for example, to adjust the +to client and potentially change these values each round, for example, to adjust the learning rate. The client will receive the dictionary returned by the -``on_fit_config_fn`` in its own ``client.fit()`` function. +``on_fit_config_fn`` in its own ``client.fit()`` function. And while the values can be +also passed directly via the context this function can be a place to implement finer +control over the `fit` behaviour that may not be achieved by the context, which sets +fixed values. Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` to customize the configuration sent to ``client.evaluate()`` diff --git a/doc/source/index.rst b/doc/source/index.rst index 197599d595a8..8695935acb04 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,27 +88,20 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. how-to-install-flower how-to-configure-clients + how-to-design-stateful-clients how-to-use-strategies how-to-implement-strategies how-to-aggregate-evaluation-results how-to-save-and-load-model-checkpoints how-to-run-simulations - how-to-monitor-simulation - how-to-configure-logging - how-to-enable-ssl-connections + how-to-enable-tls-connections how-to-use-built-in-mods how-to-use-differential-privacy how-to-authenticate-supernodes + how-to-implement-fedbn docker/index how-to-upgrade-to-flower-1.0 - how-to-upgrade-to-flower-next - -.. toctree:: - :maxdepth: 1 - :caption: Legacy example guides - - example-pytorch-from-centralized-to-federated - example-fedbn-pytorch-from-centralized-to-federated + how-to-upgrade-to-flower-1.13 Explanations ~~~~~~~~~~~~ @@ -135,7 +128,7 @@ Information-oriented API reference and other reference material. :caption: API reference :recursive: - flwr + flwr .. toctree:: :maxdepth: 2 @@ -186,7 +179,6 @@ along the way. :maxdepth: 1 :caption: Contributor references - fed/index contributor-ref-good-first-contributions contributor-ref-secure-aggregation-protocols diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index e95132bbadba..01f07f0893a9 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -1,29 +1,22 @@ Flower CLI reference ==================== +Basic Commands +-------------- + .. _flwr-apiref: -flwr CLI --------- +``flwr`` CLI +~~~~~~~~~~~~ .. click:: flwr.cli.app:typer_click_object :prog: flwr :nested: full -.. _flower-simulation-apiref: - -flower-simulation ------------------ - -.. argparse:: - :module: flwr.simulation.run_simulation - :func: _parse_args_run_simulation - :prog: flower-simulation - .. _flower-superlink-apiref: -flower-superlink ----------------- +``flower-superlink`` +~~~~~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.server.app @@ -32,38 +25,77 @@ flower-superlink .. _flower-supernode-apiref: -flower-supernode ----------------- +``flower-supernode`` +~~~~~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.client.supernode.app :func: _parse_args_run_supernode :prog: flower-supernode -.. _flower-server-app-apiref: - -flower-server-app +Advanced Commands ----------------- -.. note:: +.. _flwr-serverapp-apiref: - Note that since version ``1.11.0``, ``flower-server-app`` no longer supports passing - a reference to a `ServerApp` attribute. Instead, you need to pass the path to Flower - app via the argument ``--app``. This is the path to a directory containing a - `pyproject.toml`. You can create a valid Flower app by executing ``flwr new`` and - following the prompt. +``flwr-serverapp`` +~~~~~~~~~~~~~~~~~~ .. argparse:: - :module: flwr.server.run_serverapp - :func: _parse_args_run_server_app - :prog: flower-server-app + :module: flwr.server.serverapp.app + :func: _parse_args_run_flwr_serverapp + :prog: flwr-serverapp -.. _flower-superexec-apiref: +.. _flwr-clientapp-apiref: + +``flwr-clientapp`` +~~~~~~~~~~~~~~~~~~ + +.. argparse:: + :module: flwr.client.clientapp.app + :func: _parse_args_run_flwr_clientapp + :prog: flwr-clientapp + +Technical Commands +------------------ + +.. _flower-simulation-apiref: -flower-superexec ----------------- +``flower-simulation`` +~~~~~~~~~~~~~~~~~~~~~ .. argparse:: - :module: flwr.superexec.app - :func: _parse_args_run_superexec - :prog: flower-superexec + :module: flwr.simulation.run_simulation + :func: _parse_args_run_simulation + :prog: flower-simulation + +Deprecated Commands +------------------- + +.. _flower-server-app-apiref: + +``flower-server-app`` +~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + Note that from version ``1.13.0``, ``flower-server-app`` is deprecated. Instead, you + only need to execute |flwr_run_link|_ to start the run. + +.. _flower-superexec-apiref: + +``flower-superexec`` +~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + Note that from version ``1.13.0``, ``flower-superexec`` is deprecated. Instead, you + only need to execute |flower_superlink_link|_. + +.. |flower_superlink_link| replace:: ``flower-superlink`` + +.. |flwr_run_link| replace:: ``flwr run`` + +.. _flower_superlink_link: ref-api-cli.html#flower-superlink + +.. _flwr_run_link: ref-api-cli.html#flwr-run diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index f88a75feabd3..a4b957aa178d 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -1,5 +1,138 @@ # Changelog +## v1.13.1 (2024-11-26) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Heng Pan`, `Javier`, `Robert Steiner` + +### What's new? + +- **Fix `SimulationEngine` Executor for SuperLink** ([#4563](https://github.com/adap/flower/pull/4563), [#4568](https://github.com/adap/flower/pull/4568), [#4570](https://github.com/adap/flower/pull/4570)) + + Resolved an issue that prevented SuperLink from functioning correctly when using the `SimulationEngine` executor. + +- **Improve FAB build and install** ([#4571](https://github.com/adap/flower/pull/4571)) + + An updated FAB build and install process produces smaller FAB files and doesn't rely on `pip install` any more. It also resolves an issue where all files were unnecessarily included in the FAB file. The `flwr` CLI commands now correctly pack only the necessary files, such as `.md`, `.toml` and `.py`, ensuring more efficient and accurate packaging. + +- **Update** `embedded-devices` **example** ([#4381](https://github.com/adap/flower/pull/4381)) + + The example now uses the `flwr run` command and the Deployment Engine. + +- **Update Documentation** ([#4566](https://github.com/adap/flower/pull/4566), [#4569](https://github.com/adap/flower/pull/4569), [#4560](https://github.com/adap/flower/pull/4560), [#4556](https://github.com/adap/flower/pull/4556), [#4581](https://github.com/adap/flower/pull/4581), [#4537](https://github.com/adap/flower/pull/4537), [#4562](https://github.com/adap/flower/pull/4562), [#4582](https://github.com/adap/flower/pull/4582)) + + Enhanced documentation across various aspects, including updates to translation workflows, Docker-related READMEs, and recommended datasets. Improvements also include formatting fixes for dataset partitioning docs and better references to resources in the datasets documentation index. + +- **Update Infrastructure and CI/CD** ([#4577](https://github.com/adap/flower/pull/4577), [#4578](https://github.com/adap/flower/pull/4578), [#4558](https://github.com/adap/flower/pull/4558), [#4551](https://github.com/adap/flower/pull/4551), [#3356](https://github.com/adap/flower/pull/3356), [#4559](https://github.com/adap/flower/pull/4559), [#4575](https://github.com/adap/flower/pull/4575)) + +- **General improvements** ([#4557](https://github.com/adap/flower/pull/4557), [#4564](https://github.com/adap/flower/pull/4564), [#4573](https://github.com/adap/flower/pull/4573), [#4561](https://github.com/adap/flower/pull/4561), [#4579](https://github.com/adap/flower/pull/4579), [#4572](https://github.com/adap/flower/pull/4572)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +## v1.13.0 (2024-11-20) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Dimitris Stripelis`, `Heng Pan`, `Javier`, `Mohammad Naseri`, `Robert Steiner`, `Waris Gill`, `William Lindskog`, `Yan Gao`, `Yao Xu`, `wwjang` + +### What's new? + +- **Introduce `flwr ls` command** ([#4460](https://github.com/adap/flower/pull/4460), [#4459](https://github.com/adap/flower/pull/4459), [#4477](https://github.com/adap/flower/pull/4477)) + + The `flwr ls` command is now available to display details about all runs (or one specific run). It supports the following usage options: + + - `flwr ls --runs [] []`: Lists all runs. + - `flwr ls --run-id [] []`: Displays details for a specific run. + + This command provides information including the run ID, FAB ID and version, run status, elapsed time, and timestamps for when the run was created, started running, and finished. + +- **Fuse SuperLink and SuperExec** ([#4358](https://github.com/adap/flower/pull/4358), [#4403](https://github.com/adap/flower/pull/4403), [#4406](https://github.com/adap/flower/pull/4406), [#4357](https://github.com/adap/flower/pull/4357), [#4359](https://github.com/adap/flower/pull/4359), [#4354](https://github.com/adap/flower/pull/4354), [#4229](https://github.com/adap/flower/pull/4229), [#4283](https://github.com/adap/flower/pull/4283), [#4352](https://github.com/adap/flower/pull/4352)) + + SuperExec has been integrated into SuperLink, enabling SuperLink to directly manage ServerApp processes (`flwr-serverapp`). The `flwr` CLI now targets SuperLink's Exec API. Additionally, SuperLink introduces two isolation modes for running ServerApps: `subprocess` (default) and `process`, which can be specified using the `--isolation {subprocess,process}` flag. + +- **Introduce `flwr-serverapp` command** ([#4394](https://github.com/adap/flower/pull/4394), [#4370](https://github.com/adap/flower/pull/4370), [#4367](https://github.com/adap/flower/pull/4367), [#4350](https://github.com/adap/flower/pull/4350), [#4364](https://github.com/adap/flower/pull/4364), [#4400](https://github.com/adap/flower/pull/4400), [#4363](https://github.com/adap/flower/pull/4363), [#4401](https://github.com/adap/flower/pull/4401), [#4388](https://github.com/adap/flower/pull/4388), [#4402](https://github.com/adap/flower/pull/4402)) + + The `flwr-serverapp` command has been introduced as a CLI entry point that runs a `ServerApp` process. This process communicates with SuperLink to load and execute the `ServerApp` object, enabling isolated execution and more flexible deployment. + +- **Improve simulation engine and introduce `flwr-simulation` command** ([#4433](https://github.com/adap/flower/pull/4433), [#4486](https://github.com/adap/flower/pull/4486), [#4448](https://github.com/adap/flower/pull/4448), [#4427](https://github.com/adap/flower/pull/4427), [#4438](https://github.com/adap/flower/pull/4438), [#4421](https://github.com/adap/flower/pull/4421), [#4430](https://github.com/adap/flower/pull/4430), [#4462](https://github.com/adap/flower/pull/4462)) + + The simulation engine has been significantly improved, resulting in dramatically faster simulations. Additionally, the `flwr-simulation` command has been introduced to enhance maintainability and provide a dedicated entry point for running simulations. + +- **Improve SuperLink message management** ([#4378](https://github.com/adap/flower/pull/4378), [#4369](https://github.com/adap/flower/pull/4369)) + + SuperLink now validates the destination node ID of instruction messages and checks the TTL (time-to-live) for reply messages. When pulling reply messages, an error reply will be generated and returned if the corresponding instruction message does not exist, has expired, or if the reply message exists but has expired. + +- **Introduce FedDebug baseline** ([#3783](https://github.com/adap/flower/pull/3783)) + + FedDebug is a framework that enhances debugging in Federated Learning by enabling interactive inspection of the training process and automatically identifying clients responsible for degrading the global model's performance—all without requiring testing data or labels. Learn more in the [FedDebug baseline documentation](https://flower.ai/docs/baselines/feddebug.html). + +- **Update documentation** ([#4511](https://github.com/adap/flower/pull/4511), [#4010](https://github.com/adap/flower/pull/4010), [#4396](https://github.com/adap/flower/pull/4396), [#4499](https://github.com/adap/flower/pull/4499), [#4269](https://github.com/adap/flower/pull/4269), [#3340](https://github.com/adap/flower/pull/3340), [#4482](https://github.com/adap/flower/pull/4482), [#4387](https://github.com/adap/flower/pull/4387), [#4342](https://github.com/adap/flower/pull/4342), [#4492](https://github.com/adap/flower/pull/4492), [#4474](https://github.com/adap/flower/pull/4474), [#4500](https://github.com/adap/flower/pull/4500), [#4514](https://github.com/adap/flower/pull/4514), [#4236](https://github.com/adap/flower/pull/4236), [#4112](https://github.com/adap/flower/pull/4112), [#3367](https://github.com/adap/flower/pull/3367), [#4501](https://github.com/adap/flower/pull/4501), [#4373](https://github.com/adap/flower/pull/4373), [#4409](https://github.com/adap/flower/pull/4409), [#4356](https://github.com/adap/flower/pull/4356), [#4520](https://github.com/adap/flower/pull/4520), [#4524](https://github.com/adap/flower/pull/4524), [#4525](https://github.com/adap/flower/pull/4525), [#4526](https://github.com/adap/flower/pull/4526), [#4527](https://github.com/adap/flower/pull/4527), [#4528](https://github.com/adap/flower/pull/4528), [#4545](https://github.com/adap/flower/pull/4545), [#4522](https://github.com/adap/flower/pull/4522), [#4534](https://github.com/adap/flower/pull/4534), [#4513](https://github.com/adap/flower/pull/4513), [#4529](https://github.com/adap/flower/pull/4529), [#4441](https://github.com/adap/flower/pull/4441), [#4530](https://github.com/adap/flower/pull/4530), [#4470](https://github.com/adap/flower/pull/4470), [#4553](https://github.com/adap/flower/pull/4553), [#4531](https://github.com/adap/flower/pull/4531), [#4554](https://github.com/adap/flower/pull/4554), [#4555](https://github.com/adap/flower/pull/4555), [#4552](https://github.com/adap/flower/pull/4552), [#4533](https://github.com/adap/flower/pull/4533)) + + Many documentation pages and tutorials have been updated to improve clarity, fix typos, incorporate user feedback, and stay aligned with the latest features in the framework. Key updates include adding a guide for designing stateful `ClientApp` objects, updating the comprehensive guide for setting up and running Flower's `Simulation Engine`, updating the XGBoost, scikit-learn, and JAX quickstart tutorials to use `flwr run`, updating DP guide, removing outdated pages, updating Docker docs, and marking legacy functions as deprecated. The [Secure Aggregation Protocols](https://flower.ai/docs/framework/contributor-ref-secure-aggregation-protocols.html) page has also been updated. + +- **Update examples and templates** ([#4510](https://github.com/adap/flower/pull/4510), [#4368](https://github.com/adap/flower/pull/4368), [#4121](https://github.com/adap/flower/pull/4121), [#4329](https://github.com/adap/flower/pull/4329), [#4382](https://github.com/adap/flower/pull/4382), [#4248](https://github.com/adap/flower/pull/4248), [#4395](https://github.com/adap/flower/pull/4395), [#4386](https://github.com/adap/flower/pull/4386), [#4408](https://github.com/adap/flower/pull/4408)) + + Multiple examples and templates have been updated to enhance usability and correctness. The updates include the `30-minute-tutorial`, `quickstart-jax`, `quickstart-pytorch`, `advanced-tensorflow` examples, and the FlowerTune template. + +- **Improve Docker support** ([#4506](https://github.com/adap/flower/pull/4506), [#4424](https://github.com/adap/flower/pull/4424), [#4224](https://github.com/adap/flower/pull/4224), [#4413](https://github.com/adap/flower/pull/4413), [#4414](https://github.com/adap/flower/pull/4414), [#4336](https://github.com/adap/flower/pull/4336), [#4420](https://github.com/adap/flower/pull/4420), [#4407](https://github.com/adap/flower/pull/4407), [#4422](https://github.com/adap/flower/pull/4422), [#4532](https://github.com/adap/flower/pull/4532), [#4540](https://github.com/adap/flower/pull/4540)) + + Docker images and configurations have been updated, including updating Docker Compose files to version 1.13.0, refactoring the Docker build matrix for better maintainability, updating `docker/build-push-action` to 6.9.0, and improving Docker documentation. + +- **Allow app installation without internet access** ([#4479](https://github.com/adap/flower/pull/4479), [#4475](https://github.com/adap/flower/pull/4475)) + + The `flwr build` command now includes a wheel file in the FAB, enabling Flower app installation in environments without internet access via `flwr install`. + +- **Improve `flwr log` command** ([#4391](https://github.com/adap/flower/pull/4391), [#4411](https://github.com/adap/flower/pull/4411), [#4390](https://github.com/adap/flower/pull/4390), [#4397](https://github.com/adap/flower/pull/4397)) + +- **Refactor SuperNode for better maintainability and efficiency** ([#4439](https://github.com/adap/flower/pull/4439), [#4348](https://github.com/adap/flower/pull/4348), [#4512](https://github.com/adap/flower/pull/4512), [#4485](https://github.com/adap/flower/pull/4485)) + +- **Support NumPy `2.0`** ([#4440](https://github.com/adap/flower/pull/4440)) + +- **Update infrastructure and CI/CD** ([#4466](https://github.com/adap/flower/pull/4466), [#4419](https://github.com/adap/flower/pull/4419), [#4338](https://github.com/adap/flower/pull/4338), [#4334](https://github.com/adap/flower/pull/4334), [#4456](https://github.com/adap/flower/pull/4456), [#4446](https://github.com/adap/flower/pull/4446), [#4415](https://github.com/adap/flower/pull/4415)) + +- **Bugfixes** ([#4404](https://github.com/adap/flower/pull/4404), [#4518](https://github.com/adap/flower/pull/4518), [#4452](https://github.com/adap/flower/pull/4452), [#4376](https://github.com/adap/flower/pull/4376), [#4493](https://github.com/adap/flower/pull/4493), [#4436](https://github.com/adap/flower/pull/4436), [#4410](https://github.com/adap/flower/pull/4410), [#4442](https://github.com/adap/flower/pull/4442), [#4375](https://github.com/adap/flower/pull/4375), [#4515](https://github.com/adap/flower/pull/4515)) + +- **General improvements** ([#4454](https://github.com/adap/flower/pull/4454), [#4365](https://github.com/adap/flower/pull/4365), [#4423](https://github.com/adap/flower/pull/4423), [#4516](https://github.com/adap/flower/pull/4516), [#4509](https://github.com/adap/flower/pull/4509), [#4498](https://github.com/adap/flower/pull/4498), [#4371](https://github.com/adap/flower/pull/4371), [#4449](https://github.com/adap/flower/pull/4449), [#4488](https://github.com/adap/flower/pull/4488), [#4478](https://github.com/adap/flower/pull/4478), [#4392](https://github.com/adap/flower/pull/4392), [#4483](https://github.com/adap/flower/pull/4483), [#4517](https://github.com/adap/flower/pull/4517), [#4330](https://github.com/adap/flower/pull/4330), [#4458](https://github.com/adap/flower/pull/4458), [#4347](https://github.com/adap/flower/pull/4347), [#4429](https://github.com/adap/flower/pull/4429), [#4463](https://github.com/adap/flower/pull/4463), [#4496](https://github.com/adap/flower/pull/4496), [#4508](https://github.com/adap/flower/pull/4508), [#4444](https://github.com/adap/flower/pull/4444), [#4417](https://github.com/adap/flower/pull/4417), [#4504](https://github.com/adap/flower/pull/4504), [#4418](https://github.com/adap/flower/pull/4418), [#4480](https://github.com/adap/flower/pull/4480), [#4455](https://github.com/adap/flower/pull/4455), [#4468](https://github.com/adap/flower/pull/4468), [#4385](https://github.com/adap/flower/pull/4385), [#4487](https://github.com/adap/flower/pull/4487), [#4393](https://github.com/adap/flower/pull/4393), [#4489](https://github.com/adap/flower/pull/4489), [#4389](https://github.com/adap/flower/pull/4389), [#4507](https://github.com/adap/flower/pull/4507), [#4469](https://github.com/adap/flower/pull/4469), [#4340](https://github.com/adap/flower/pull/4340), [#4353](https://github.com/adap/flower/pull/4353), [#4494](https://github.com/adap/flower/pull/4494), [#4461](https://github.com/adap/flower/pull/4461), [#4362](https://github.com/adap/flower/pull/4362), [#4473](https://github.com/adap/flower/pull/4473), [#4405](https://github.com/adap/flower/pull/4405), [#4416](https://github.com/adap/flower/pull/4416), [#4453](https://github.com/adap/flower/pull/4453), [#4491](https://github.com/adap/flower/pull/4491), [#4539](https://github.com/adap/flower/pull/4539), [#4542](https://github.com/adap/flower/pull/4542), [#4538](https://github.com/adap/flower/pull/4538), [#4543](https://github.com/adap/flower/pull/4543), [#4541](https://github.com/adap/flower/pull/4541), [#4550](https://github.com/adap/flower/pull/4550), [#4481](https://github.com/adap/flower/pull/4481)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Deprecations + +- **Deprecate Python 3.9** + + Flower is deprecating support for Python 3.9 as several of its dependencies are phasing out compatibility with this version. While no immediate changes have been made, users are encouraged to plan for upgrading to a supported Python version. + +### Incompatible changes + +- **Remove `flower-superexec` command** ([#4351](https://github.com/adap/flower/pull/4351)) + + The `flower-superexec` command, previously used to launch SuperExec, is no longer functional as SuperExec has been merged into SuperLink. Starting an additional SuperExec is no longer necessary when SuperLink is initiated. + +- **Remove `flower-server-app` command** ([#4490](https://github.com/adap/flower/pull/4490)) + + The `flower-server-app` command has been removed. To start a Flower app, please use the `flwr run` command instead. + +- **Remove `app` argument from `flower-supernode` command** ([#4497](https://github.com/adap/flower/pull/4497)) + + The usage of `flower-supernode ` has been removed. SuperNode will now load the FAB delivered by SuperLink, and it is no longer possible to directly specify an app directory. + +- **Remove support for non-app simulations** ([#4431](https://github.com/adap/flower/pull/4431)) + + The simulation engine (via `flower-simulation`) now exclusively supports passing an app. + +- **Rename CLI arguments for `flower-superlink` command** ([#4412](https://github.com/adap/flower/pull/4412)) + + The `--driver-api-address` argument has been renamed to `--serverappio-api-address` in the `flower-superlink` command to reflect the renaming of the `Driver` service to the `ServerAppIo` service. + +- **Rename CLI arguments for `flwr-serverapp` and `flwr-clientapp` commands** ([#4495](https://github.com/adap/flower/pull/4495)) + + The CLI arguments have been renamed for clarity and consistency. Specifically, `--superlink` for `flwr-serverapp` is now `--serverappio-api-address`, and `--supernode` for `flwr-clientapp` is now `--clientappio-api-address`. + ## v1.12.0 (2024-10-14) ### Thanks to our contributors diff --git a/doc/source/tutorial-quickstart-android.rst b/doc/source/tutorial-quickstart-android.rst index f2691203078c..582befd716c8 100644 --- a/doc/source/tutorial-quickstart-android.rst +++ b/doc/source/tutorial-quickstart-android.rst @@ -6,6 +6,14 @@ Quickstart Android .. meta:: :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. +.. warning:: + + The experimental Flower Android SDK is not compatible with the latest version of + Flower. Android support is currently being reworked and will be released in 2025. + + This quickstart tutorial is kept for historical purposes and will be updated once + the new Android SDK is released. + Let's build a federated learning system using TFLite and Flower on Android! Please refer to the `full code example diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst index 8a9250f8dfb0..3d4f95b95763 100644 --- a/doc/source/tutorial-quickstart-ios.rst +++ b/doc/source/tutorial-quickstart-ios.rst @@ -6,6 +6,14 @@ Quickstart iOS .. meta:: :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. +.. warning:: + + The experimental Flower iOS SDK is not compatible with the latest version of Flower. + iOS support is currently being reworked and will be released in 2025. + + This quickstart tutorial is kept for historical purposes and will be updated once + the new iOS SDK is released. + In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index fe15227fdf11..3373e87adc17 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -3,14 +3,8 @@ Quickstart XGBoost ================== -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. - -.. youtube:: AY1vpXUpesc - :width: 100% - -Federated XGBoost ------------------ +XGBoost +------- EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries @@ -21,37 +15,38 @@ concurrently, unlike the sequential approach taken by GBDT. Often, for tabular data on medium-sized datasets with fewer than 10k training examples, XGBoost surpasses the results of deep learning techniques. -Why federated XGBoost? +Why Federated XGBoost? ~~~~~~~~~~~~~~~~~~~~~~ -Indeed, as the demand for data privacy and decentralized learning grows, there's an -increasing requirement to implement federated XGBoost systems for specialised -applications, like survival analysis and financial fraud detection. +As the demand for data privacy and decentralized learning grows, there's an increasing +requirement to implement federated XGBoost systems for specialised applications, like +survival analysis and financial fraud detection. Federated learning ensures that raw data remains on the local device, making it an -attractive approach for sensitive domains where data security and privacy are paramount. -Given the robustness and efficiency of XGBoost, combining it with federated learning -offers a promising solution for these specific challenges. - -In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset -using Flower and ``xgboost`` package. We use a simple example (`full code -xgboost-quickstart -`_) with two -*clients* and one *server* to demonstrate how federated XGBoost works, and then we dive -into a more complex example (`full code xgboost-comprehensive -`_) to run -various experiments. +attractive approach for sensitive domains where data privacy is paramount. Given the +robustness and efficiency of XGBoost, combining it with federated learning offers a +promising solution for these specific challenges. Environment Setup ----------------- -First of all, it is recommended to create a virtual environment and run everything -within a :doc:`virtualenv `. +In this tutorial, we learn how to train a federated XGBoost model on the HIGGS dataset +using Flower and the ``xgboost`` package to perform a binary classification task. We use +a simple example (`full code xgboost-quickstart +`_) to demonstrate +how federated XGBoost works, and then we dive into a more complex comprehensive example +(`full code xgboost-comprehensive +`_) to run +various experiments. + +It is recommended to create a virtual environment and run everything within a +:doc:`virtualenv `. We first need to install Flower and Flower Datasets. You can do this by running : .. code-block:: shell + # In a new Python environment $ pip install flwr flwr-datasets Since we want to use ``xgboost`` package to build up XGBoost trees, let's go ahead and @@ -61,79 +56,59 @@ install ``xgboost``: $ pip install xgboost -Flower Client -------------- +The Configurations +~~~~~~~~~~~~~~~~~~ -*Clients* are responsible for generating individual weight-updates for the model based -on their local datasets. Now that we have all our dependencies installed, let's run a -simple distributed training with two clients and one server. +We define all required configurations / hyper-parameters inside the ``pyproject.toml`` +file: -In a file called ``client.py``, import xgboost, Flower, Flower Datasets and other -related functions: +.. code-block:: toml -.. code-block:: python + [tool.flwr.app.config] + # ServerApp + num-server-rounds = 3 + fraction-fit = 0.1 + fraction-evaluate = 0.1 - import argparse - from typing import Union - from logging import INFO - from datasets import Dataset, DatasetDict - import xgboost as xgb + # ClientApp + local-epochs = 1 + params.objective = "binary:logistic" + params.eta = 0.1 # Learning rate + params.max-depth = 8 + params.eval-metric = "auc" + params.nthread = 16 + params.num-parallel-tree = 1 + params.subsample = 1 + params.tree-method = "hist" - import flwr as fl - from flwr_datasets import FederatedDataset - from flwr.common.logger import log - from flwr.common import ( - Code, - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - GetParametersIns, - GetParametersRes, - Parameters, - Status, - ) - from flwr_datasets.partitioner import IidPartitioner +The ``local-epochs`` represents the number of iterations for local tree boost. We use +CPU for the training in default. One can assign it to a GPU by setting ``tree_method`` +to ``gpu_hist``. We use AUC as evaluation metric. -Dataset partition and hyper-parameter selection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The Data +~~~~~~~~ -Prior to local training, we require loading the HIGGS dataset from Flower Datasets and -conduct data partitioning for FL: +This tutorial uses `Flower Datasets `_ to easily +download and partition the `HIGGS` dataset. .. code-block:: python - # Load (HIGGS) dataset and conduct partitioning - # We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. - partitioner = IidPartitioner(num_partitions=30) + # Load (HIGGS) dataset and partition. + # We use a small subset (num_partitions=20) of the dataset for demonstration to speed up the data loading process. + partitioner = IidPartitioner(num_partitions=20) fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) - # Load the partition for this `node_id` - partition = fds.load_partition(partition_id=args.partition_id, split="train") + # Load the partition for this `partition_id` + partition = fds.load_partition(partition_id, split="train") partition.set_format("numpy") -In this example, we split the dataset into 30 partitions with uniform distribution -(``IidPartitioner(num_partitions=30)``). Then, we load the partition for the given -client based on ``partition_id``: +In this example, we split the dataset into 20 partitions with uniform distribution +(`IidPartitioner +`_). +Then, we load the partition for the given client based on ``partition_id``. -.. code-block:: python - - # We first define arguments parser for user to specify the client/partition ID. - parser = argparse.ArgumentParser() - parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", - ) - args = parser.parse_args() - - # Load the partition for this `partition_id`. - partition = fds.load_partition(idx=args.partition_id, split="train") - partition.set_format("numpy") - -After that, we do train/test splitting on the given partition (client's local data), and -transform data format for ``xgboost`` package. +Subsequently, we train/test split using the given partition (client's local data), and +reformat data to DMatrix for the ``xgboost`` package. .. code-block:: python @@ -151,8 +126,7 @@ as below: .. code-block:: python - # Define data partitioning related functions - def train_test_split(partition: Dataset, test_fraction: float, seed: int): + def train_test_split(partition, test_fraction, seed): """Split the data into train and validation set given split rate.""" train_test = partition.train_test_split(test_size=test_fraction, seed=seed) partition_train = train_test["train"] @@ -164,42 +138,25 @@ as below: return partition_train, partition_test, num_train, num_test - def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: + def transform_dataset_to_dmatrix(data): """Transform dataset to DMatrix format for xgboost.""" x = data["inputs"] y = data["label"] new_data = xgb.DMatrix(x, label=y) return new_data -Finally, we define the hyper-parameters used for XGBoost training. +The ClientApp +~~~~~~~~~~~~~ -.. code-block:: python - - num_local_round = 1 - params = { - "objective": "binary:logistic", - "eta": 0.1, # lr - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", - } - -The ``num_local_round`` represents the number of iterations for local tree boost. We use -CPU for the training in default. One can shift it to GPU by setting ``tree_method`` to -``gpu_hist``. We use AUC as evaluation metric. - -Flower client definition for XGBoost -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After loading the dataset we define the Flower client. We follow the general rule to -define ``XgbClient`` class inherited from ``fl.client.Client``. +*Clients* are responsible for generating individual weight-updates for the model based +on their local datasets. Let's first see how we define Flower client for XGBoost. We +follow the general rule to define ``FlowerClient`` class inherited from +``fl.client.Client``. .. code-block:: python - class XgbClient(fl.client.Client): + # Define Flower Client and client_fn + class FlowerClient(Client): def __init__( self, train_dmatrix, @@ -216,27 +173,10 @@ define ``XgbClient`` class inherited from ``fl.client.Client``. self.num_local_round = num_local_round self.params = params -All required parameters defined above are passed to ``XgbClient``'s constructor. - -Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods insides -``XgbClient`` class as follows. - -.. code-block:: python - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) +All required parameters defined above are passed to ``FlowerClient``'s constructor. -Unlike neural network training, XGBoost trees are not started from a specified random -weights. In this case, we do not use ``get_parameters`` and ``set_parameters`` to -initialise model parameters for XGBoost. As a result, let's return an empty tensor in -``get_parameters`` when it is called by the server at the first round. +Then, we override ``fit`` and ``evaluate`` methods insides ``FlowerClient`` class as +follows. .. code-block:: python @@ -252,8 +192,7 @@ initialise model parameters for XGBoost. As a result, let's return an empty tens ) else: bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) + global_model = bytearray(ins.parameters.tensors[0]) # Load global model into booster bst.load_model(global_model) @@ -278,7 +217,7 @@ initialise model parameters for XGBoost. As a result, let's return an empty tens In ``fit``, at the first round, we call ``xgb.train()`` to build up the first set of trees. From the second round, we load the global model sent from server to new build Booster object, and then update model weights on local training data with function -``local_boost`` as follows: +``_local_boost`` as follows: .. code-block:: python @@ -303,8 +242,7 @@ training, the last ``N=num_local_round`` trees will be extracted to send to the def evaluate(self, ins: EvaluateIns) -> EvaluateRes: # Load global model bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) + para_b = bytearray(ins.parameters.tensors[0]) bst.load_model(para_b) # Run evaluation @@ -314,9 +252,6 @@ training, the last ``N=num_local_round`` trees will be extracted to send to the ) auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - return EvaluateRes( status=Status( code=Code.OK, @@ -330,54 +265,26 @@ training, the last ``N=num_local_round`` trees will be extracted to send to the In ``evaluate``, after loading the global model, we call ``bst.eval_set`` function to conduct evaluation on valid set. The AUC value will be returned. -Now, we can create an instance of our class ``XgbClient`` and add one line to actually -run this client: - -.. code-block:: python - - fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), - ) - -That's it for the client. We only have to implement ``Client`` and call -``fl.client.start_client()``. The string ``"[::]:8080"`` tells the client which server -to connect to. In our case we can run the server and the client on the same machine, -therefore we use ``"[::]:8080"``. If we run a truly federated workload with the server -and clients running on different machines, all that needs to change is the -``server_address`` we point the client at. - -Flower Server -------------- +The ServerApp +~~~~~~~~~~~~~ -These updates are then sent to the *server* which will aggregate them to produce a -better model. Finally, the *server* sends this improved version of the model back to -each *client* to finish a complete FL round. +After the local training on clients, clients' model updates are sent to the *server*, +which aggregates them to produce a better model. Finally, the *server* sends this +improved model version back to each *client* to complete a federated round. -In a file named ``server.py``, import Flower and FedXgbBagging from -``flwr.server.strategy``. - -We first define a strategy for XGBoost bagging aggregation. +In the file named ``server_app.py``, we define a strategy for XGBoost bagging +aggregation: .. code-block:: python # Define strategy strategy = FedXgbBagging( - fraction_fit=1.0, - min_fit_clients=2, - min_available_clients=2, - min_evaluate_clients=2, - fraction_evaluate=1.0, + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, on_evaluate_config_fn=config_func, on_fit_config_fn=config_func, + initial_parameters=parameters, ) @@ -398,24 +305,12 @@ We first define a strategy for XGBoost bagging aggregation. } return config -We use two clients for this example. An ``evaluate_metrics_aggregation`` function is -defined to collect and wighted average the AUC values from clients. The ``config_func`` -function is to return the current FL round number to client's ``fit()`` and -``evaluate()`` methods. - -Then, we start the server: +An ``evaluate_metrics_aggregation`` function is defined to collect and wighted average +the AUC values from clients. The ``config_func`` function is to return the current FL +round number to client's ``fit()`` and ``evaluate()`` methods. -.. code-block:: python - - # Start Flower server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=5), - strategy=strategy, - ) - -Tree-based bagging aggregation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Tree-based Bagging Aggregation +++++++++++++++++++++++++++++++ You must be curious about how bagging aggregation works. Let's look into the details. @@ -581,107 +476,89 @@ for the current and previous model by calling ``_get_tree_nums``. Then, the fetc information will be aggregated. After that, the trees (containing model weights) are aggregated to generate a new tree model. -After traversal of all clients' models, a new global model is generated, followed by the -serialisation, and sending back to each client. +After traversal of all clients' models, a new global model is generated, followed by +serialisation, and sending the global model back to each client. Launch Federated XGBoost! ------------------------- -With both client and server ready, we can now run everything and see federated learning -in action. FL systems usually have a server and multiple clients. We therefore have to -start the server first: - -.. code-block:: shell - - $ python3 server.py - -Once the server is running we can start the clients in different terminals. Open a new -terminal and start the first client: - -.. code-block:: shell - - $ python3 client.py --partition-id=0 - -Open another terminal and start the second client: +To run the project, do: .. code-block:: shell - $ python3 client.py --partition-id=1 + # Run with default arguments + $ flwr run . -Each client will have its own dataset. You should now see how the training does in the -very first terminal (the one that started the server): +With default arguments you will see an output like this one: .. code-block:: shell - INFO : Starting Flower server, config: num_rounds=5, no round_timeout - INFO : Flower ECE: gRPC server running (5 rounds), SSL is disabled + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : INFO : [INIT] - INFO : Requesting initial parameters from one random client - INFO : Received initial parameters from one random client - INFO : Evaluating initial global parameters + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) INFO : INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : configure_fit: strategy sampled 2 clients (out of 20) INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : configure_evaluate: strategy sampled 2 clients (out of 20) INFO : aggregate_evaluate: received 2 results and 0 failures INFO : INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : configure_fit: strategy sampled 2 clients (out of 20) INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : configure_evaluate: strategy sampled 2 clients (out of 20) INFO : aggregate_evaluate: received 2 results and 0 failures INFO : INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 2 clients (out of 2) - INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 2) - INFO : aggregate_evaluate: received 2 results and 0 failures - INFO : - INFO : [ROUND 4] - INFO : configure_fit: strategy sampled 2 clients (out of 2) - INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 2) - INFO : aggregate_evaluate: received 2 results and 0 failures - INFO : - INFO : [ROUND 5] - INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : configure_fit: strategy sampled 2 clients (out of 20) INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : configure_evaluate: strategy sampled 2 clients (out of 20) INFO : aggregate_evaluate: received 2 results and 0 failures INFO : INFO : [SUMMARY] - INFO : Run finished 5 round(s) in 1.67s + INFO : Run finished 3 round(s) in 145.42s INFO : History (loss, distributed): INFO : round 1: 0 INFO : round 2: 0 INFO : round 3: 0 - INFO : round 4: 0 - INFO : round 5: 0 INFO : History (metrics, distributed, evaluate): - INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} + INFO : {'AUC': [(1, 0.7664), (2, 0.77595), (3, 0.7826)]} + INFO : Congratulations! You've successfully built and run your first federated XGBoost system. -The AUC values can be checked in ``metrics_distributed``. One can see that the average -AUC increases over FL rounds. +The AUC values can be checked in ``History (metrics, distributed, evaluate)``. One can +see that the average AUC increases over FL rounds. + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 params.eta=0.05" + +.. note:: -The full `source code -`_ for this -example can be found in ``examples/xgboost-quickstart``. + Check the full `source code + `_ for this + example in ``examples/xgboost-quickstart`` in the Flower GitHub repository. Comprehensive Federated XGBoost ------------------------------- -Now that you have known how federated XGBoost work with Flower, it's time to run some -more comprehensive experiments by customising the experimental settings. In the +Now that you know how federated XGBoost works with Flower, it's time to run some more +comprehensive experiments by customising the experimental settings. In the xgboost-comprehensive example (`full code `_), we provide more options to define various experimental setups, including aggregation strategies, -data partitioning and centralised/distributed evaluation. We also support :doc:`Flower -simulation ` making it easy to simulate large client cohorts in -a resource-aware manner. Let's take a look! +data partitioning and centralised / distributed evaluation. Let's take a look! -Cyclic training +Cyclic Training ~~~~~~~~~~~~~~~ In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL @@ -690,7 +567,7 @@ one single client participating in the training per round in the cyclic training scenario. The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. -To do this, we first customise a ``ClientManager`` in ``server_utils.py``: +To do this, we first customise a ``ClientManager`` in ``server_app.py``: .. code-block:: python @@ -733,7 +610,7 @@ To do this, we first customise a ``ClientManager`` in ``server_utils.py``: The customised ``ClientManager`` samples all available clients in each FL round based on the order of connection to the server. Then, we define a new strategy ``FedXgbCyclic`` in ``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially select only one -client in given round and pass the received model to next client. +client in given round and pass the received model to the next client. .. code-block:: python @@ -804,48 +681,17 @@ clients to be sequentially selected given FL round: # Return client/config pairs return [(client, fit_ins) for client in sampled_clients] - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - """Configure the next round of evaluation.""" - # Do not configure federated evaluation if fraction eval is 0. - if self.fraction_evaluate == 0.0: - return [] - - # Parameters and config - config = {} - if self.on_evaluate_config_fn is not None: - # Custom evaluation config function provided - config = self.on_evaluate_config_fn(server_round) - evaluate_ins = EvaluateIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_evaluation_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, evaluate_ins) for client in sampled_clients] - -Customised data partitioning +Customised Data Partitioning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In ``dataset.py``, we have a function ``instantiate_partitioner`` to instantiate the -data partitioner based on the given ``num_partitions`` and ``partitioner_type``. +In ``task.py``, we use the ``instantiate_fds`` function to instantiate Flower Datasets +and the data partitioner based on the given ``partitioner_type`` and ``num_partitions``. Currently, we provide four supported partitioner type to simulate the uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). .. code-block:: python + from flwr_datasets import FederatedDataset from flwr_datasets.partitioner import ( IidPartitioner, LinearPartitioner, @@ -861,22 +707,29 @@ uniformity/non-uniformity in data quantity (uniform, linear, square, exponential } - def instantiate_partitioner(partitioner_type: str, num_partitions: int): - """Initialise partitioner based on selected partitioner type and number of - partitions.""" - partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( - num_partitions=num_partitions - ) - return partitioner + def instantiate_fds(partitioner_type, num_partitions): + """Initialize FederatedDataset.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( + num_partitions=num_partitions + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + preprocessor=resplit, + ) + return fds -Customised centralised/distributed evaluation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Customised Centralised / Distributed Evaluation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To facilitate centralised evaluation, we define a function in ``server_utils.py``: +To facilitate centralised evaluation, we define a function in ``server_app.py``: .. code-block:: python - def get_evaluate_fn(test_data): + def get_evaluate_fn(test_data, params): """Return a function for centralised evaluation.""" def evaluate_fn( @@ -898,446 +751,95 @@ To facilitate centralised evaluation, we define a function in ``server_utils.py` iteration=bst.num_boosted_rounds() - 1, ) auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - log(INFO, f"AUC = {auc} at round {server_round}") return 0, {"AUC": auc} return evaluate_fn -This function returns a evaluation function which instantiates a ``Booster`` object and -loads the global model weights to it. The evaluation is conducted by calling +This function returns an evaluation function, which instantiates a ``Booster`` object +and loads the global model weights to it. The evaluation is conducted by calling ``eval_set()`` method, and the tested AUC value is reported. As for distributed evaluation on the clients, it's same as the quick-start example by overriding the ``evaluate()`` method insides the ``XgbClient`` class in -``client_utils.py``. - -Flower simulation -~~~~~~~~~~~~~~~~~ - -We also provide an example code (``sim.py``) to use the simulation capabilities of -Flower to simulate federated XGBoost training on either a single machine or a cluster of -machines. - -.. code-block:: python - - from logging import INFO - import xgboost as xgb - from tqdm import tqdm - - import flwr as fl - from flwr_datasets import FederatedDataset - from flwr.common.logger import log - from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - - from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - separate_xy, - resplit, - ) - from utils import ( - sim_args_parser, - NUM_LOCAL_ROUND, - BST_PARAMS, - ) - from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, - ) - from client_utils import XgbClient - -After importing all required packages, we define a ``main()`` function to perform the -simulation process: - -.. code-block:: python - - def main(): - # Parse arguments for experimental settings - args = sim_args_parser() - - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size - ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, - ) - - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with node_id - partition = fds.load_partition(node_id=node_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) - - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) - -We first load the dataset and perform data partitioning, and the pre-processed data is -stored in a ``list``. After the simulation begins, the clients won't need to pre-process -their partitions again. - -Then, we define the strategies and other hyper-parameters: - -.. code-block:: python - - # Define strategy - if args.train_method == "bagging": - # Bagging training - strategy = FedXgbBagging( - evaluate_function=( - get_evaluate_fn(test_dmatrix) if args.centralised_eval else None - ), - fraction_fit=(float(args.num_clients_per_round) / args.pool_size), - min_fit_clients=args.num_clients_per_round, - min_available_clients=args.pool_size, - min_evaluate_clients=( - args.num_evaluate_clients if not args.centralised_eval else 0 - ), - fraction_evaluate=1.0 if not args.centralised_eval else 0.0, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=( - evaluate_metrics_aggregation if not args.centralised_eval else None - ), - ) - else: - # Cyclic training - strategy = FedXgbCyclic( - fraction_fit=1.0, - min_available_clients=args.pool_size, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - ) - - # Resources to be assigned to each virtual client - # In this example we use CPU by default - client_resources = { - "num_cpus": args.num_cpus_per_client, - "num_gpus": 0.0, - } - - # Hyper-parameters for xgboost training - num_local_round = NUM_LOCAL_ROUND - params = BST_PARAMS - - # Setup learning rate - if args.train_method == "bagging" and args.scaled_lr: - new_lr = params["eta"] / args.pool_size - params.update({"eta": new_lr}) - -After that, we start the simulation by calling ``fl.simulation.start_simulation``: - -.. code-block:: python - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn( - train_data_list, - valid_data_list, - args.train_method, - params, - num_local_round, - ), - num_clients=args.pool_size, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=args.num_rounds), - strategy=strategy, - client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, - ) - -One of key parameters for ``start_simulation`` is ``client_fn`` which returns a function -to construct a client. We define it as follows: - -.. code-block:: python - - def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round - ): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] - - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] - - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) - - return client_fn - -Arguments parser -~~~~~~~~~~~~~~~~ - -In ``utils.py``, we define the arguments parsers for clients, server and simulation, -allowing users to specify different experimental settings. Let's first see the sever -side: - -.. code-block:: python - - import argparse - - - def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - -This allows user to specify training strategies / the number of total clients / FL -rounds / participating clients / clients for evaluation, and evaluation fashion. Note -that with ``--centralised-eval``, the sever will do centralised evaluation and all +``client_app.py``. + +Arguments Explainer +~~~~~~~~~~~~~~~~~~~ + +We define all hyper-parameters under ``[tool.flwr.app.config]`` entry in +``pyproject.toml``: + +.. code-block:: toml + + [tool.flwr.app.config] + # ServerApp + train-method = "bagging" # Choose from [bagging, cyclic] + num-server-rounds = 3 + fraction-fit = 1.0 + fraction-evaluate = 1.0 + centralised-eval = false + + # ClientApp + partitioner-type = "uniform" # Choose from [uniform, linear, square, exponential] + test-fraction = 0.2 + seed = 42 + centralised-eval-client = false + local-epochs = 1 + scaled-lr = false + params.objective = "binary:logistic" + params.eta = 0.1 # Learning rate + params.max-depth = 8 + params.eval-metric = "auc" + params.nthread = 16 + params.num-parallel-tree = 1 + params.subsample = 1 + params.tree-method = "hist" + +On the server side, we allow user to specify training strategies / FL rounds / +participating clients / clients for evaluation, and evaluation fashion. Note that with +``centralised-eval = true``, the sever will do centralised evaluation and all functionalities for client evaluation will be disabled. -Then, the argument parser on client side: - -.. code-block:: python - - def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--node-id", - default=0, - type=int, - help="Node ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - -This defines various options for client data partitioning. Besides, clients also have an -option to conduct evaluation on centralised test set by setting ``--centralised-eval``, -as well as an option to perform scaled learning rate based on the number of clients by -setting ``--scaled-lr``. - -We also have an argument parser for simulation: +On the client side, we can define various options for client data partitioning. Besides, +clients also have an option to conduct evaluation on centralised test set by setting +``centralised-eval = true``, as well as an option to perform scaled learning rate based +on the number of clients by setting ``scaled-lr = true``. -.. code-block:: python - - def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) +Example Commands +~~~~~~~~~~~~~~~~ - args = parser.parse_args() - return args +To run bagging aggregation for 5 rounds evaluated on centralised test set: -This integrates all arguments for both client and server sides. +.. code-block:: shell -Example commands -~~~~~~~~~~~~~~~~ + flwr run . --run-config "train-method='bagging' num-server-rounds=5 centralised-eval=true" -To run a centralised evaluated experiment with bagging strategy on 5 clients with -exponential distribution for 50 rounds, we first start the server as below: +To run cyclic training with linear partitioner type evaluated on centralised test set: .. code-block:: shell - $ python3 server.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval + flwr run . --run-config "train-method='cyclic' partitioner-type='linear' + centralised-eval-client=true" -Then, on each client terminal, we start the clients: +.. note:: -.. code-block:: shell + The full `code + `_ for + this comprehensive example can be found in ``examples/xgboost-comprehensive`` in the + Flower GitHub repository. - $ python3 clients.py --train-method=bagging --num-partitions=5 --partitioner-type=exponential --node-id=NODE_ID +Video Tutorial +-------------- -To run the same experiment with Flower simulation: +.. note:: -.. code-block:: shell + The video shown below shows how to setup a XGBoost + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) - $ python3 sim.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --partitioner-type=exponential --centralised-eval +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. -The full `code -`_ for this -comprehensive example can be found in ``examples/xgboost-comprehensive``. +.. youtube:: AY1vpXUpesc + :width: 100% diff --git a/doc/source/tutorial-series-what-is-federated-learning.ipynb b/doc/source/tutorial-series-what-is-federated-learning.ipynb old mode 100755 new mode 100644 index 7d77d1770457..a94dc2910959 --- a/doc/source/tutorial-series-what-is-federated-learning.ipynb +++ b/doc/source/tutorial-series-what-is-federated-learning.ipynb @@ -11,7 +11,7 @@ "\n", "In this tutorial, you will learn what federated learning is, build your first system in Flower, and gradually extend it. If you work through all parts of the tutorial, you will be able to build advanced federated learning systems that approach the current state of the art in the field.\n", "\n", - "🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated learning. Only a basic understanding of data science and Python programming is assumed.\n", + "🧑‍🏫 This tutorial starts from zero and expects no familiarity with federated learning. Only a basic understanding of data science and Python programming is assumed.\n", "\n", "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the open-source Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", @@ -23,9 +23,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Classic machine learning\n", + "## Classical Machine Learning\n", "\n", - "Before we begin to discuss federated learning, let us quickly recap how most machine learning works today.\n", + "Before we begin discussing federated learning, let us quickly recap how most machine learning works today.\n", "\n", "In machine learning, we have a model, and we have data. The model could be a neural network (as depicted here), or something else, like classical linear regression.\n", "\n", @@ -39,9 +39,9 @@ " \"Train\n", "\n", "\n", - "Now, in practice, the training data we work with doesn't originate on the machine we train the model on. It gets created somewhere else.\n", + "In practice, the training data we work with doesn't originate on the machine we train the model on. \n", "\n", - "It originates on a smartphone by the user interacting with an app, a car collecting sensor data, a laptop receiving input via the keyboard, or a smart speaker listening to someone trying to sing a song.\n", + "This data gets created \"somewhere else\". For instance, the data can originate on a smartphone by the user interacting with an app, a car collecting sensor data, a laptop receiving input via the keyboard, or a smart speaker listening to someone trying to sing a song.\n", "\n", "

\n", " \"Data\n", @@ -53,7 +53,7 @@ " \"Data\n", "
\n", "\n", - "So to use machine learning, or any kind of data analysis, the approach that has been used in the past was to collect all data on a central server. This server can be somewhere in a data center, or somewhere in the cloud.\n", + "So to use machine learning, or any kind of data analysis, the approach that has been used in the past was to collect all this data on a central server. This server can be located somewhere in a data center, or somewhere in the cloud.\n", "\n", "
\n", " \"Central\n", @@ -67,13 +67,12 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Challenges of classical machine learning\n", "\n", - "The classic machine learning approach we've just seen can be used in some cases. Great examples include categorizing holiday photos, or analyzing web traffic. Cases, where all the data is naturally available on a centralized server.\n", + "This classical machine learning approach we've just seen can be used in some cases. Great examples include categorizing holiday photos, or analyzing web traffic. Cases, where all the data is naturally available on a centralized server.\n", "\n", "
\n", " \"Centralized\n", @@ -85,18 +84,18 @@ " \"Centralized\n", "
\n", "\n", - "There are many reasons why the classic centralized machine learning approach does not work for a large number of highly important real-world use cases. Those reasons include:\n", + "There are many reasons why the classical centralized machine learning approach does not work for a large number of highly important real-world use cases. Those reasons include:\n", "\n", - "- **Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD (Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS (Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP (Indonesia), PDPA (Singapore), APP (Australia), and other regulations protect sensitive data from being moved. In fact, those regulations sometimes even prevent single organizations from combining their own users' data for artificial intelligence training because those users live in different parts of the world, and their data is governed by different data protection regulations.\n", + "- **Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD (Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS (Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP (Indonesia), PDPA (Singapore), APP (Australia), and other regulations protect sensitive data from being moved. In fact, those regulations sometimes even prevent single organizations from combining their own users' data for machine learning training because those users live in different parts of the world, and their data is governed by different data protection regulations.\n", "- **User preference**: In addition to regulation, there are use cases where users just expect that no data leaves their device, ever. If you type your passwords and credit card info into the digital keyboard of your phone, you don't expect those passwords to end up on the server of the company that developed that keyboard, do you? In fact, that use case was the reason federated learning was invented in the first place.\n", "- **Data volume**: Some sensors, like cameras, produce such a high data volume that it is neither feasible nor economic to collect all the data (due to, for example, bandwidth or communication efficiency). Think about a national rail service with hundreds of train stations across the country. If each of these train stations is outfitted with a number of security cameras, the volume of raw on-device data they produce requires incredibly powerful and exceedingly expensive infrastructure to process and store. And most of the data isn't even useful.\n", "\n", "Examples where centralized machine learning does not work include:\n", "\n", - "- Sensitive healthcare records from multiple hospitals to train cancer detection models\n", - "- Financial information from different organizations to detect financial fraud\n", - "- Location data from your electric car to make better range prediction\n", - "- End-to-end encrypted messages to train better auto-complete models\n", + "- Sensitive healthcare records from multiple hospitals to train cancer detection models.\n", + "- Financial information from different organizations to detect financial fraud.\n", + "- Location data from your electric car to make better range prediction.\n", + "- End-to-end encrypted messages to train better auto-complete models.\n", "\n", "The popularity of privacy-enhancing systems like the [Brave](https://brave.com/) browser or the [Signal](https://signal.org/) messenger shows that users care about privacy. In fact, they choose the privacy-enhancing version over other alternatives, if such an alternative exists. But what can we do to apply machine learning and data science to these cases to utilize private data? After all, these are all areas that would benefit significantly from recent advances in AI." ] @@ -106,16 +105,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Federated learning\n", + "## Federated Learning\n", "\n", - "Federated learning simply reverses this approach. It enables machine learning on distributed data by moving the training to the data, instead of moving the data to the training. Here's the single-sentence explanation:\n", + "Federated Learning simply reverses this approach. It enables machine learning on distributed data by moving the training to the data, instead of moving the data to the training. Here's a one-liner explanation:\n", "\n", - "- Central machine learning: move the data to the computation\n", - "- Federated (machine) learning: move the computation to the data\n", + "- Centralized machine learning: move the data to the computation\n", + "- Federated (machine) Learning: move the computation to the data\n", "\n", - "By doing so, it enables us to use machine learning (and other data science approaches) in areas where it wasn't possible before. We can now train excellent medical AI models by enabling different hospitals to work together. We can solve financial fraud by training AI models on the data of different financial institutions. We can build novel privacy-enhancing applications (such as secure messaging) that have better built-in AI than their non-privacy-enhancing alternatives. And those are just a few of the examples that come to mind. As we deploy federated learning, we discover more and more areas that can suddenly be reinvented because they now have access to vast amounts of previously inaccessible data.\n", + "By doing so, Federated Learning enables us to use machine learning (and other data science approaches) in areas where it wasn't possible before. We can now train excellent medical AI models by enabling different hospitals to work together. We can solve financial fraud by training AI models on the data of different financial institutions. We can build novel privacy-enhancing applications (such as secure messaging) that have better built-in AI than their non-privacy-enhancing alternatives. And those are just a few of the examples that come to mind. As we deploy Federated Learning, we discover more and more areas that can suddenly be reinvented because they now have access to vast amounts of previously inaccessible data.\n", "\n", - "So how does federated learning work, exactly? Let's start with an intuitive explanation.\n", + "So how does Federated Learning work, exactly? Let's start with an intuitive explanation.\n", "\n", "### Federated learning in five steps\n", "\n", @@ -129,7 +128,7 @@ "\n", "#### Step 1: Send model to a number of connected organizations/devices (client nodes)\n", "\n", - "Next, we send the parameters of the global model to the connected client nodes (think: edge devices like smartphones or servers belonging to organizations). This is to ensure that each participating node starts their local training using the same model parameters. We often use only a few of the connected nodes instead of all nodes. The reason for this is that selecting more and more client nodes has diminishing returns.\n", + "Next, we send the parameters of the global model to the connected client nodes (think: edge devices like smartphones or servers belonging to organizations). This is to ensure that each participating node starts its local training using the same model parameters. We often use only a few of the connected nodes instead of all nodes. The reason for this is that selecting more and more client nodes has diminishing returns.\n", "\n", "
\n", " \"Send\n", @@ -155,7 +154,7 @@ "\n", "The server receives model updates from the selected client nodes. If it selected 100 client nodes, it now has 100 slightly different versions of the original global model, each trained on the local data of one client. But didn't we want to have one model that contains the learnings from the data of all 100 client nodes?\n", "\n", - "In order to get one single model, we have to combine all the model updates we received from the client nodes. This process is called *aggregation*, and there are many different ways to do it. The most basic way to do it is called *Federated Averaging* ([McMahan et al., 2016](https://arxiv.org/abs/1602.05629)), often abbreviated as *FedAvg*. *FedAvg* takes the 100 model updates and, as the name suggests, averages them. To be more precise, it takes the *weighted average* of the model updates, weighted by the number of examples each client used for training. The weighting is important to make sure that each data example has the same \"influence\" on the resulting global model. If one client has 10 examples, and another client has 100 examples, then - without weighting - each of the 10 examples would influence the global model ten times as much as each of the 100 examples.\n", + "In order to get one single model, we have to combine all the model updates we received from the client nodes. This process is called *aggregation*, and there are many different ways to do it. The most basic way is called *Federated Averaging* ([McMahan et al., 2016](https://arxiv.org/abs/1602.05629)), often abbreviated as *FedAvg*. *FedAvg* takes the 100 model updates and, as the name suggests, averages them. To be more precise, it takes the *weighted average* of the model updates, weighted by the number of examples each client used for training. The weighting is important to make sure that each data example has the same \"influence\" on the resulting global model. If one client has 10 examples, and another client has 100 examples, then - without weighting - each of the 10 examples would influence the global model ten times as much as each of the 100 examples.\n", "\n", "
\n", " \"Aggregate\n", @@ -171,11 +170,11 @@ "\n", "Congratulations, you now understand the basics of federated learning. There's a lot more to discuss, of course, but that was federated learning in a nutshell. In later parts of this tutorial, we will go into more detail. Interesting questions include: How can we select the best client nodes that should participate in the next round? What's the best way to aggregate model updates? How can we handle failing client nodes (stragglers)?\n", "\n", - "### Federated evaluation\n", + "### Federated Evaluation\n", "\n", "Just like we can train a model on the decentralized data of different client nodes, we can also evaluate the model on that data to receive valuable metrics. This is called federated evaluation, sometimes abbreviated as FE. In fact, federated evaluation is an integral part of most federated learning systems.\n", "\n", - "### Federated analytics\n", + "### Federated Analytics\n", "\n", "In many cases, machine learning isn't necessary to derive value from data. Data analysis can yield valuable insights, but again, there's often not enough data to get a clear answer. What's the average age at which people develop a certain type of health condition? Federated analytics enables such queries over multiple client nodes. It is usually used in conjunction with other privacy-enhancing technologies like secure aggregation to prevent the server from seeing the results submitted by individual client nodes.\n", "\n", @@ -203,7 +202,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Final remarks\n", + "## Final Remarks\n", "\n", "Congratulations, you just learned the basics of federated learning and how it relates to the classic (centralized) machine learning!\n", "\n", diff --git a/e2e/e2e-bare/e2e_bare/server_app.py b/e2e/e2e-bare/e2e_bare/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-bare/e2e_bare/server_app.py +++ b/e2e/e2e-bare/e2e_bare/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-fastai/e2e_fastai/server_app.py b/e2e/e2e-fastai/e2e_fastai/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-fastai/e2e_fastai/server_app.py +++ b/e2e/e2e-fastai/e2e_fastai/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-jax/e2e_jax/server_app.py b/e2e/e2e-jax/e2e_jax/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-jax/e2e_jax/server_app.py +++ b/e2e/e2e-jax/e2e_jax/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-opacus/e2e_opacus/server_app.py b/e2e/e2e-opacus/e2e_opacus/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-opacus/e2e_opacus/server_app.py +++ b/e2e/e2e-opacus/e2e_opacus/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-pandas/e2e_pandas/server_app.py b/e2e/e2e-pandas/e2e_pandas/server_app.py index 06f3eb68bb28..65098a6bad7c 100644 --- a/e2e/e2e-pandas/e2e_pandas/server_app.py +++ b/e2e/e2e-pandas/e2e_pandas/server_app.py @@ -49,7 +49,7 @@ def main(driver, context): if __name__ == "__main__": hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=1), strategy=FedAnalytics(), ) diff --git a/e2e/e2e-pandas/pyproject.toml b/e2e/e2e-pandas/pyproject.toml index f10b05b44756..120e9b8e6d35 100644 --- a/e2e/e2e-pandas/pyproject.toml +++ b/e2e/e2e-pandas/pyproject.toml @@ -11,7 +11,7 @@ authors = [{ name = "Ragy Haddad", email = "ragy202@gmail.com" }] maintainers = [{ name = "The Flower Authors", email = "hello@flower.ai" }] dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", - "numpy>=1.21.0,<2.0.0", + "numpy>=2.0.0", "pandas>=2.0.0,<3.0.0", "scikit-learn>=1.1.1,<2.0.0", ] diff --git a/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py +++ b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-pytorch-lightning/pyproject.toml b/e2e/e2e-pytorch-lightning/pyproject.toml index 66ecbb6296d0..efb0eb1bebf1 100644 --- a/e2e/e2e-pytorch-lightning/pyproject.toml +++ b/e2e/e2e-pytorch-lightning/pyproject.toml @@ -9,8 +9,8 @@ description = "Federated Learning E2E test with Flower and PyTorch Lightning" license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", - "pytorch-lightning==2.2.4", - "torchvision==0.14.1", + "pytorch-lightning==2.4.0", + "torchvision>=0.20.1,<0.21.0", ] [tool.hatch.build.targets.wheel] diff --git a/e2e/e2e-pytorch/e2e_pytorch/server_app.py b/e2e/e2e-pytorch/e2e_pytorch/server_app.py index cb4f65eed0da..3b089fdc8803 100644 --- a/e2e/e2e-pytorch/e2e_pytorch/server_app.py +++ b/e2e/e2e-pytorch/e2e_pytorch/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) @@ -72,7 +72,7 @@ def main(driver, context): ) if STATE_VAR in hist.metrics_distributed: - # The checks in record_state_metrics don't do anythinng if client's state has a single entry + # The checks in record_state_metrics don't do anything if client's state has a single entry state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] assert ( len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] diff --git a/e2e/e2e-pytorch/pyproject.toml b/e2e/e2e-pytorch/pyproject.toml index 0e48334693d3..9e2029aecefb 100644 --- a/e2e/e2e-pytorch/pyproject.toml +++ b/e2e/e2e-pytorch/pyproject.toml @@ -9,8 +9,8 @@ description = "PyTorch Federated Learning E2E test with Flower" license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", - "torch>=1.12.0,<2.0.0", - "torchvision>=0.14.1,<0.15.0", + "torch>=2.5.0,<3.0.0", + "torchvision>=0.20.1,<0.21.0", "tqdm>=4.63.0,<5.0.0", ] diff --git a/e2e/e2e-pytorch/simulation.py b/e2e/e2e-pytorch/simulation.py index c465fbc4816e..032fbe738f28 100644 --- a/e2e/e2e-pytorch/simulation.py +++ b/e2e/e2e-pytorch/simulation.py @@ -44,7 +44,7 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 ) -# The checks in record_state_metrics don't do anythinng if client's state has a single entry +# The checks in record_state_metrics don't do anything if client's state has a single entry state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] assert ( len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] diff --git a/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py index ae00c240c9ba..cd87aeb15fab 100644 --- a/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py +++ b/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py @@ -56,4 +56,4 @@ def client_fn(context: Context): if __name__ == "__main__": # Start Flower client - start_client(server_address="0.0.0.0:8080", client=FlowerClient().to_client()) + start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py +++ b/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/e2e-scikit-learn/pyproject.toml b/e2e/e2e-scikit-learn/pyproject.toml index aef9a4a8a00b..03f5540aa15d 100644 --- a/e2e/e2e-scikit-learn/pyproject.toml +++ b/e2e/e2e-scikit-learn/pyproject.toml @@ -15,6 +15,7 @@ dependencies = [ "flwr[simulation,rest] @ {root:parent:parent:uri}", "scikit-learn>=1.1.1,<2.0.0", "openml>=0.14.0,<0.15.0", + "numpy<2.0.0", ] [tool.hatch.build.targets.wheel] diff --git a/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py b/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py index cb4f65eed0da..0f7a8968a2f8 100644 --- a/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py +++ b/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py @@ -61,7 +61,7 @@ def main(driver, context): ) hist = fl.server.start_server( - server_address="0.0.0.0:8080", + server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/e2e/test_superexec.sh b/e2e/test_exec_api.sh similarity index 60% rename from e2e/test_superexec.sh rename to e2e/test_exec_api.sh index ae79128c6ac1..6337d6edab99 100755 --- a/e2e/test_superexec.sh +++ b/e2e/test_exec_api.sh @@ -9,14 +9,13 @@ case "$1" in --ssl-certfile ../certificates/server.pem --ssl-keyfile ../certificates/server.key' client_arg='--root-certificates ../certificates/ca.crt' - # For $superexec_arg, note special ordering of single- and double-quotes - superexec_arg='--executor-config 'root-certificates=\"../certificates/ca.crt\"'' - superexec_arg="$server_arg $superexec_arg" + # For $executor_config, note special ordering of single- and double-quotes + executor_config='root-certificates="../certificates/ca.crt"' ;; insecure) server_arg='--insecure' client_arg=$server_arg - superexec_arg=$server_arg + executor_config='' ;; esac @@ -43,11 +42,10 @@ esac # Set engine case "$3" in deployment-engine) - superexec_engine_arg='--executor flwr.superexec.deployment:executor' + executor_arg="--executor flwr.superexec.deployment:executor" ;; simulation-engine) - superexec_engine_arg='--executor flwr.superexec.simulation:executor - --executor-config 'num-supernodes=10'' + executor_arg="--executor flwr.superexec.simulation:executor" ;; esac @@ -69,33 +67,40 @@ pip install -e . --no-deps # Check if the first argument is 'insecure' if [ "$1" == "insecure" ]; then # If $1 is 'insecure', append the first line - echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml else # Otherwise, append the second line - echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"../certificates/ca.crt\"" >> pyproject.toml + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"../certificates/ca.crt\"" >> pyproject.toml fi -timeout 2m flower-superlink $server_arg $server_auth & -sl_pid=$! -sleep 2 +if [ "$3" = "simulation-engine" ]; then + echo -e $"options.num-supernodes = 10" >> pyproject.toml +fi -timeout 2m flower-supernode ./ $client_arg \ - --superlink $server_address $client_auth_1 \ - --node-config "partition-id=0 num-partitions=2" --max-retries 0 & -cl1_pid=$! -sleep 2 +# Combine the arguments into a single command for flower-superlink +combined_args="$server_arg $server_auth $exec_api_arg $executor_arg" -timeout 2m flower-supernode ./ $client_arg \ - --superlink $server_address $client_auth_2 \ - --node-config "partition-id=1 num-partitions=2" --max-retries 0 & -cl2_pid=$! +timeout 2m flower-superlink $combined_args --executor-config "$executor_config" 2>&1 | tee flwr_output.log & +sl_pid=$(pgrep -f "flower-superlink") sleep 2 -timeout 2m flower-superexec $superexec_arg $superexec_engine_arg 2>&1 | tee flwr_output.log & -se_pid=$(pgrep -f "flower-superexec") -sleep 2 +if [ "$3" = "deployment-engine" ]; then + timeout 2m flower-supernode $client_arg \ + --superlink $server_address $client_auth_1 \ + --clientappio-api-address localhost:9094 \ + --node-config "partition-id=0 num-partitions=2" --max-retries 0 & + cl1_pid=$! + sleep 2 -timeout 1m flwr run --run-config num-server-rounds=1 ../e2e-tmp-test superexec + timeout 2m flower-supernode $client_arg \ + --superlink $server_address $client_auth_2 \ + --clientappio-api-address localhost:9095 \ + --node-config "partition-id=1 num-partitions=2" --max-retries 0 & + cl2_pid=$! + sleep 2 +fi + +timeout 1m flwr run --run-config num-server-rounds=1 ../e2e-tmp-test e2e # Initialize a flag to track if training is successful found_success=false @@ -107,7 +112,11 @@ while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do if grep -q "Run finished" flwr_output.log; then echo "Training worked correctly!" found_success=true - kill $cl1_pid; kill $cl2_pid; sleep 1; kill $sl_pid; kill $se_pid; + if [ "$3" = "deployment-engine" ]; then + kill $cl1_pid; kill $cl2_pid; + fi + sleep 1; kill $sl_pid; + exit 0; else echo "Waiting for training ... ($elapsed seconds elapsed)" fi @@ -118,5 +127,9 @@ done if [ "$found_success" = false ]; then echo "Training had an issue and timed out." - kill $cl1_pid; kill $cl2_pid; kill $sl_pid; kill $se_pid; + if [ "$3" = "deployment-engine" ]; then + kill $cl1_pid; kill $cl2_pid; + fi + kill $sl_pid; + exit 1; fi diff --git a/e2e/test_reconnection.sh b/e2e/test_reconnection.sh index 80788b92ebde..132c7d4fdfd9 100755 --- a/e2e/test_reconnection.sh +++ b/e2e/test_reconnection.sh @@ -22,67 +22,112 @@ case "$1" in ;; esac -dir_arg="./.." +# Define the function +check_and_kill() { + local pids=$1 # Get the PID as the first argument to the function + for pid in $pids; do + echo "Attempting to kill process ID: $pid" + if kill "$pid" 2>/dev/null; then + echo "Process $pid successfully killed." + else + echo "Failed to kill process $pid or it may have already terminated." + fi + done +} -timeout 2m flower-superlink --insecure $db_arg $rest_arg & -sl_pid=$! +# Install Flower app +pip install -e . --no-deps + +# Remove any duplicates +sed -i '/^\[tool\.flwr\.federations\.e2e\]/,/^$/d' pyproject.toml + +# Append the federations config to pyproject.toml +echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml +sleep 1 + +timeout 10m flower-superlink --insecure $db_arg $rest_arg & +sl_pids=$(pgrep -f "flower-superlink") echo "Starting SuperLink" sleep 3 -timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +timeout 10m flower-supernode --insecure $rest_arg --superlink $server_address \ + --clientappio-api-address="localhost:9094" & cl1_pid=$! echo "Starting first client" sleep 3 -timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +timeout 10m flower-supernode --insecure $rest_arg --superlink $server_address \ + --clientappio-api-address="localhost:9095" & cl2_pid=$! echo "Starting second client" sleep 3 # Kill superlink, this should send the clients into their retry loops -kill $sl_pid +check_and_kill "$sl_pids" echo "Killing Superlink" sleep 3 # Restart superlink, the clients should now be able to reconnect to it -timeout 2m flower-superlink --insecure $db_arg $rest_arg & -sl_pid=$! +timeout 10m flower-superlink --insecure $db_arg $rest_arg 2>&1 | tee flwr_output.log & +sl_pids=$(pgrep -f "flower-superlink") echo "Restarting Superlink" sleep 20 -# Kill first client, this should send a DeleteNode message to the Superlink +# Kill second client, this should send a DeleteNode message to the Superlink kill $cl1_pid -echo "Killing first client" -sleep 3 +echo "Killing second client" +sleep 5 -# Starting new client, this is so we have enough clients to start the server-app -timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +# Starting new client, this is so we have enough clients to execute `flwr run` +timeout 10m flower-supernode --insecure $rest_arg --superlink $server_address \ + --clientappio-api-address "localhost:9094" & cl1_pid=$! echo "Starting new client" sleep 5 -# We start the server-app to begining the training -timeout 2m flower-server-app ./ $rest_arg --superlink $server_app_address & -pid=$! -echo "Starting server-app to start training" +# We execute `flwr run` to begin the training +timeout 2m flwr run "." e2e & +echo "Executing flwr run to start training" +sleep 10 -# Kill first client as soon as the training starts, -# the server-app should just receive a failure in this case and continue the rounds -# when enough clients are connected +# Kill first client as soon as the training starts, the flwr-serverapp should just +# receive a failure in this case and continue the rounds when enough clients are +# connected kill $cl1_pid echo "Killing first client" -sleep 1 +sleep 3 # Restart first client so enough clients are connected to continue the FL rounds -timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +timeout 5m flower-supernode --insecure $rest_arg --superlink $server_address \ + --clientappio-api-address "localhost:9094" & cl1_pid=$! echo "Starting new client" +sleep 5 -wait $pid -res=$? +# Initialize a flag to track if training is successful +found_success=false +timeout=120 # Timeout after 120 seconds +elapsed=0 -if [[ "$res" = "0" ]]; - then echo "Training worked correctly"; kill $cl1_pid; kill $cl2_pid; kill $sl_pid; - else echo "Training had an issue" && exit 1; -fi +# Check for "Success" in a loop with a timeout +while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do + if grep -q "Run finished" flwr_output.log; then + echo "Training worked correctly!" + found_success=true + kill $cl1_pid; kill $cl2_pid + sleep 3 + check_and_kill "$sl_pids" + else + echo "Waiting for training ... ($elapsed seconds elapsed)" + fi + # Sleep for a short period and increment the elapsed time + sleep 2 + elapsed=$((elapsed + 2)) +done +if [ "$found_success" = false ]; then + echo "Training had an issue and timed out." + kill $cl1_pid; kill $cl2_pid + sleep 3 + check_and_kill "$sl_pids" +fi diff --git a/e2e/test_superlink.sh b/e2e/test_superlink.sh index 2016f6da1933..bc3b2a15fae0 100755 --- a/e2e/test_superlink.sh +++ b/e2e/test_superlink.sh @@ -19,7 +19,7 @@ case "$2" in rest) rest_arg_superlink="--fleet-api-type rest" rest_arg_supernode="--rest" - server_address="http://localhost:9093" + server_address="http://localhost:9095" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" server_auth="" @@ -58,25 +58,64 @@ case "$2" in ;; esac -timeout 2m flower-superlink $server_arg $db_arg $rest_arg_superlink $server_auth & -sl_pid=$! +# Install Flower app +pip install -e . --no-deps + +# Remove any duplicates +sed -i '/^\[tool\.flwr\.federations\.e2e\]/,/^$/d' pyproject.toml + +# Check if the first argument is 'insecure' +if [ "$server_arg" = "--insecure" ]; then + # If $server_arg is '--insecure', append the first line + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml +else + # Otherwise, append the second line + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"certificates/ca.crt\"" >> pyproject.toml +fi + +timeout 5m flower-superlink $server_arg $db_arg $rest_arg_superlink $server_auth \ + 2>&1 | tee flwr_output.log & +sl_pid=$(pgrep -f "flower-superlink") sleep 3 -timeout 2m flower-supernode ./ $client_arg $rest_arg_supernode --superlink $server_address $client_auth_1 & +timeout 5m flower-supernode $client_arg $rest_arg_supernode \ + --superlink $server_address $client_auth_1 \ + --clientappio-api-address "localhost:9094" \ + --max-retries 0 & cl1_pid=$! sleep 3 -timeout 2m flower-supernode ./ $client_arg $rest_arg_supernode --superlink $server_address $client_auth_2 & +timeout 5m flower-supernode $client_arg $rest_arg_supernode \ + --superlink $server_address $client_auth_2 \ + --clientappio-api-address "localhost:9096" \ + --max-retries 0 & cl2_pid=$! sleep 3 -timeout 2m flower-server-app $server_dir $client_arg --superlink $server_app_address & -pid=$! +timeout 1m flwr run "." e2e + +# Initialize a flag to track if training is successful +found_success=false +timeout=240 # Timeout after 240 seconds +elapsed=0 -wait $pid -res=$? +# Check for "Success" in a loop with a timeout +while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do + if grep -q "Run finished" flwr_output.log; then + echo "Training worked correctly!" + found_success=true + kill $cl1_pid; kill $cl2_pid; + sleep 1; kill $sl_pid; + else + echo "Waiting for training ... ($elapsed seconds elapsed)" + fi + # Sleep for a short period and increment the elapsed time + sleep 2 + elapsed=$((elapsed + 2)) +done -if [[ "$res" = "0" ]]; - then echo "Training worked correctly"; kill $cl1_pid; kill $cl2_pid; kill $sl_pid; - else echo "Training had an issue" && exit 1; +if [ "$found_success" = false ]; then + echo "Training had an issue and timed out." + kill $cl1_pid; kill $cl2_pid; + kill $sl_pid; fi diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml index 84ad510db50a..6a1283e0df58 100644 --- a/examples/advanced-pytorch/pyproject.toml +++ b/examples/advanced-pytorch/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch and Flower (Advanced Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/advanced-tensorflow/.gitignore b/examples/advanced-tensorflow/.gitignore new file mode 100644 index 000000000000..f70a08a85cd7 --- /dev/null +++ b/examples/advanced-tensorflow/.gitignore @@ -0,0 +1,3 @@ +wandb/ +outputs/ + diff --git a/examples/advanced-tensorflow/README.md b/examples/advanced-tensorflow/README.md index 375c539d13dd..db6ee7c660c1 100644 --- a/examples/advanced-tensorflow/README.md +++ b/examples/advanced-tensorflow/README.md @@ -1,79 +1,94 @@ --- -tags: [advanced, vision, fds] -dataset: [CIFAR-10] -framework: [tensorflow, Keras] +tags: [advanced, vision, fds, wandb] +dataset: [Fashion-MNIST] +framework: [keras, tensorflow] --- -# Advanced Flower Example (TensorFlow/Keras) +# Federated Learning with TensorFlow/Keras and Flower (Advanced Example) -This example demonstrates an advanced federated learning setup using Flower with TensorFlow/Keras. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: +> \[!TIP\] +> This example shows intermediate and advanced functionality of Flower. If you are new to Flower, it is recommended to start from the [quickstart-tensorflow](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) example or the [quickstart TensorFlow tutorial](https://flower.ai/docs/framework/tutorial-quickstart-tensorflow.html). -- 10 clients (instead of just 2) -- Each client holds a local dataset of 1/10 of the train datasets and 80% is training examples and 20% as test examples (note that by default only a small subset of this data is used when running the `run.sh` script) -- Server-side model evaluation after parameter aggregation -- Hyperparameter schedule using config functions -- Custom return values -- Server-side parameter initialization +This example shows how to extend your `ClientApp` and `ServerApp` capabilities compared to what's shown in the [`quickstart-tensorflow`](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) example. In particular, it will show how the `ClientApp`'s state (and object of type [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html)) can be used to enable stateful clients, facilitating the design of personalized federated learning strategies, among others. The `ServerApp` in this example makes use of a custom strategy derived from the built-in [FedAvg](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedAvg.html). In addition, it will also showcase how to: -## Project Setup +1. Save model checkpoints +2. Save the metrics available at the strategy (e.g. accuracies, losses) +3. Log training artefacts to [Weights & Biases](https://wandb.ai/site) +4. Implement a simple decaying learning rate schedule across rounds -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +The structure of this directory is as follows: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/advanced-tensorflow . && rm -rf flower && cd advanced-tensorflow +advanced-tensorflow +├── tensorflow_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines a custom strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -This will create a new directory called `advanced-tensorflow` containing the following files: +> \[!NOTE\] +> By default this example will log metrics to Weights & Biases. For this, you need to ensure that your system has logged in. Often it's as simple as executing `wandb login` on the terminal after installing `wandb`. Please, refer to this [quickstart guide](https://docs.wandb.ai/quickstart#2-log-in-to-wb) for more information. -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md --- run.sh -``` +This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) with the [Dirichlet Partitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.DirichletPartitioner.html#flwr_datasets.partitioner.DirichletPartitioner) to partition the [Fashion-MNIST](https://huggingface.co/datasets/zalando-datasets/fashion_mnist) dataset in a non-IID fashion into 50 partitions. -### Installing Dependencies +![](_static/fmnist_50_lda.png) -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +> \[!TIP\] +> You can use Flower Datasets [built-in visualization tools](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html) to easily generate plots like the one above. -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell +Install the dependencies defined in `pyproject.toml` as well as the `pytorch_example` package. Note that if you want to make use of the GPU, you'll need to install additional packages as described in the [Install Tensorflow](https://www.tensorflow.org/install/pip#linux) documentation. + +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +## Run the project -```shell -poetry run python3 -c "import flwr" -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -If you don't see any errors you're good to go! +When you run the project, the strategy will create a directory structure in the form of `outputs/date/time` and store two `JSON` files: `config.json` containing the `run-config` that the `ServerApp` receives; and `results.json` containing the results (accuracies, losses) that are generated at the strategy. -#### pip +By default, the metrics: {`centralized_accuracy`, `centralized_loss`, `federated_evaluate_accuracy`, `federated_evaluate_loss`} will be logged to Weights & Biases (they are also stored to the `results.json` previously mentioned). Upon executing `flwr run` you'll see a URL linking to your Weight&Biases dashboard where you can see the metrics. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +![](_static/wandb_plots.png) -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +With default parameters, 25% of the total 50 nodes (see `num-supernodes` in `pyproject.toml`) will be sampled for `fit` and 50% for an `evaluate` round. By default `ClientApp` objects will run on CPU. + +> \[!TIP\] +> To run your `ClientApps` on GPU or to adjust the degree or parallelism of your simulation, edit the `[tool.flwr.federations.local-simulation]` section in the `pyproject.toml`. + +```bash +flwr run . + +# To disable W&B +flwr run . --run-config use-wandb=false ``` -## Run Federated Learning with TensorFlow/Keras and Flower +> \[!WARNING\] +> By default TensorFlow processes that use GPU will try to pre-allocate the entire available VRAM. This is undesirable for simulations where we want the GPU to be shared among several `ClientApp` instances. Enable the [GPU memory growth](https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth) by setting the `TF_FORCE_GPU_ALLOW_GROWTH` environment variable to ensure processes only make use of the VRAM they need. -The included `run.sh` will call a script to generate certificates (which will be used by server and clients), start the Flower server (using `server.py`), sleep for 10 seconds to ensure the server is up, and then start 10 Flower clients (using `client.py`). You can simply start everything in a terminal as follows: +You can run the app using another federation (see `pyproject.toml`). For example, if you have a GPU available, select the `local-sim-gpu` federation: -```shell -# Once you have activated your environment -./run.sh +```bash +export TF_FORCE_GPU_ALLOW_GROWTH="true" +flwr run . local-sim-gpu ``` -The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -By default `run.sh` uses only a subset of the data. To use the full data, remove the `--toy` argument or set it to False. +```bash +flwr run . --run-config "num-server-rounds=10 fraction-fit=0.5" +``` -## Important / Warning +### Run with the Deployment Engine -The approach used to generate SSL certificates can serve as an inspiration and starting point, but it should not be considered as viable for production environments. Please refer to other sources regarding the issue of correctly generating certificates for production environments. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/advanced-tensorflow/_static/fmnist_50_lda.png b/examples/advanced-tensorflow/_static/fmnist_50_lda.png new file mode 100644 index 000000000000..9dfedc59a3de Binary files /dev/null and b/examples/advanced-tensorflow/_static/fmnist_50_lda.png differ diff --git a/examples/advanced-tensorflow/_static/wandb_plots.png b/examples/advanced-tensorflow/_static/wandb_plots.png new file mode 100644 index 000000000000..f0f44ca5be19 Binary files /dev/null and b/examples/advanced-tensorflow/_static/wandb_plots.png differ diff --git a/examples/advanced-tensorflow/client.py b/examples/advanced-tensorflow/client.py deleted file mode 100644 index b6a485b7ba4c..000000000000 --- a/examples/advanced-tensorflow/client.py +++ /dev/null @@ -1,131 +0,0 @@ -import argparse -import os -from pathlib import Path - -import flwr as fl -import tensorflow as tf -from flwr_datasets import FederatedDataset - -# Make TensorFlow logs less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -# Define Flower client -class CifarClient(fl.client.NumPyClient): - def __init__(self, model, x_train, y_train, x_test, y_test): - self.model = model - self.x_train, self.y_train = x_train, y_train - self.x_test, self.y_test = x_test, y_test - - def get_properties(self, config): - """Get properties of client.""" - raise Exception("Not implemented") - - def get_parameters(self, config): - """Get parameters of the local model.""" - raise Exception("Not implemented (server-side parameter initialization)") - - def fit(self, parameters, config): - """Train parameters on the locally held training set.""" - - # Update local model parameters - self.model.set_weights(parameters) - - # Get hyperparameters for this round - batch_size: int = config["batch_size"] - epochs: int = config["local_epochs"] - - # Train the model using hyperparameters from config - history = self.model.fit( - self.x_train, - self.y_train, - batch_size, - epochs, - validation_split=0.1, - ) - - # Return updated model parameters and results - parameters_prime = self.model.get_weights() - num_examples_train = len(self.x_train) - results = { - "loss": history.history["loss"][0], - "accuracy": history.history["accuracy"][0], - "val_loss": history.history["val_loss"][0], - "val_accuracy": history.history["val_accuracy"][0], - } - return parameters_prime, num_examples_train, results - - def evaluate(self, parameters, config): - """Evaluate parameters on the locally held test set.""" - - # Update local model with global parameters - self.model.set_weights(parameters) - - # Get config values - steps: int = config["val_steps"] - - # Evaluate global model parameters on the local test data and return results - loss, accuracy = self.model.evaluate(self.x_test, self.y_test, 32, steps=steps) - num_examples_test = len(self.x_test) - return loss, num_examples_test, {"accuracy": accuracy} - - -def main() -> None: - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--client-id", - type=int, - default=0, - choices=range(0, 10), - required=True, - help="Specifies the artificial data partition of CIFAR10 to be used. " - "Picks partition 0 by default", - ) - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to quicky run the client using only 10 datasamples. " - "Useful for testing purposes. Default: False", - ) - args = parser.parse_args() - - # Load and compile Keras model - model = tf.keras.applications.EfficientNetB0( - input_shape=(32, 32, 3), weights=None, classes=10 - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - - # Load a subset of CIFAR-10 to simulate the local data partition - x_train, y_train, x_test, y_test = load_partition(args.client_id) - - if args.toy: - x_train, y_train = x_train[:10], y_train[:10] - x_test, y_test = x_test[:10], y_test[:10] - - # Start Flower client - client = CifarClient(model, x_train, y_train, x_test, y_test).to_client() - - fl.client.start_client( - server_address="127.0.0.1:8080", - client=client, - root_certificates=Path(".cache/certificates/ca.crt").read_bytes(), - ) - - -def load_partition(idx: int): - """Load 1/10th of the training and test data to simulate a partition.""" - # Download and partition dataset - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(idx) - partition.set_format("numpy") - - # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2, seed=42) - x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] - x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] - return x_train, y_train, x_test, y_test - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-tensorflow/pyproject.toml b/examples/advanced-tensorflow/pyproject.toml index 9fc623a0f3ec..938636dc6704 100644 --- a/examples/advanced-tensorflow/pyproject.toml +++ b/examples/advanced-tensorflow/pyproject.toml @@ -1,16 +1,46 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "advanced-tensorflow" -version = "0.1.0" -description = "Advanced Flower/TensorFlow Example" -authors = ["The Flower Authors "] +[project] +name = "tensorflow_fexample" +version = "1.0.0" +description = "Federated Learning with Tensorflow/Keras and Flower (Advanced Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.13.1", + "flwr-datasets[vision]>=0.3.0", + "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", + "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", + "wandb==0.17.8", +] +[tool.hatch.build.targets.wheel] +packages = ["."] -[tool.poetry.dependencies] -python = ">=3.9,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "tensorflow_example.server_app:app" +clientapp = "tensorflow_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 +local-epochs = 1 +batch-size = 32 +fraction-fit = 0.25 +fraction-evaluate = 0.5 +use-wandb = true + +[tool.flwr.federations] +default = "local-sim" + +[tool.flwr.federations.local-sim] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.0 # ratio of VRAM a ClientApp has access to + +[tool.flwr.federations.local-sim-gpu] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.25 diff --git a/examples/advanced-tensorflow/requirements.txt b/examples/advanced-tensorflow/requirements.txt deleted file mode 100644 index 0cb5fe8c07af..000000000000 --- a/examples/advanced-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" diff --git a/examples/advanced-tensorflow/run.sh b/examples/advanced-tensorflow/run.sh deleted file mode 100755 index 4acef1371571..000000000000 --- a/examples/advanced-tensorflow/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -./certificates/generate.sh - -echo "Starting server" - -python server.py & -sleep 10 # Sleep for 10s to give the server enough time to start and download the dataset - -for i in $(seq 0 9); do - echo "Starting client $i" - python client.py --client-id=${i} --toy & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/advanced-tensorflow/server.py b/examples/advanced-tensorflow/server.py deleted file mode 100644 index 8febdd57614d..000000000000 --- a/examples/advanced-tensorflow/server.py +++ /dev/null @@ -1,90 +0,0 @@ -from pathlib import Path -from typing import Dict, Optional, Tuple - -import flwr as fl -import tensorflow as tf -from flwr_datasets import FederatedDataset - - -def main() -> None: - # Load and compile model for - # 1. server-side parameter initialization - # 2. server-side parameter evaluation - model = tf.keras.applications.EfficientNetB0( - input_shape=(32, 32, 3), weights=None, classes=10 - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.3, - fraction_evaluate=0.2, - min_fit_clients=3, - min_evaluate_clients=2, - min_available_clients=10, - evaluate_fn=get_evaluate_fn(model), - on_fit_config_fn=fit_config, - on_evaluate_config_fn=evaluate_config, - initial_parameters=fl.common.ndarrays_to_parameters(model.get_weights()), - ) - - # Start Flower server (SSL-enabled) for four rounds of federated learning - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=4), - strategy=strategy, - certificates=( - Path(".cache/certificates/ca.crt").read_bytes(), - Path(".cache/certificates/server.pem").read_bytes(), - Path(".cache/certificates/server.key").read_bytes(), - ), - ) - - -def get_evaluate_fn(model): - """Return an evaluation function for server-side evaluation.""" - - # Load data here to avoid the overhead of doing it in `evaluate` itself - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - test = fds.load_split("test") - test.set_format("numpy") - x_test, y_test = test["img"] / 255.0, test["label"] - - # The `evaluate` function will be called after every round - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: - model.set_weights(parameters) # Update model with the latest parameters - loss, accuracy = model.evaluate(x_test, y_test) - return loss, {"accuracy": accuracy} - - return evaluate - - -def fit_config(server_round: int): - """Return training configuration dict for each round. - - Keep batch size fixed at 32, perform two rounds of training with one local epoch, - increase to two local epochs afterwards. - """ - config = { - "batch_size": 32, - "local_epochs": 1 if server_round < 2 else 2, - } - return config - - -def evaluate_config(server_round: int): - """Return evaluation configuration dict for each round. - - Perform five local evaluation steps on each client (i.e., use five batches) during - rounds one to three, then increase to ten local evaluation steps. - """ - val_steps = 5 if server_round < 4 else 10 - return {"val_steps": val_steps} - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-tensorflow/tensorflow_example/__init__.py b/examples/advanced-tensorflow/tensorflow_example/__init__.py new file mode 100644 index 000000000000..dea9272d014e --- /dev/null +++ b/examples/advanced-tensorflow/tensorflow_example/__init__.py @@ -0,0 +1 @@ +"""tensorflow-example: A Flower / Tensorflow app.""" diff --git a/examples/advanced-tensorflow/tensorflow_example/client_app.py b/examples/advanced-tensorflow/tensorflow_example/client_app.py new file mode 100644 index 000000000000..f137a7b398b4 --- /dev/null +++ b/examples/advanced-tensorflow/tensorflow_example/client_app.py @@ -0,0 +1,128 @@ +"""tensorflow-example: A Flower / Tensorflow app.""" + +import keras +from tensorflow_example.task import load_data, load_model + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context, ParametersRecord, RecordSet, array_from_numpy + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + """A simple client that showcases how to use the state. + + It implements a basic version of `personalization` by which + the classification layer of the CNN is stored locally and used + and updated during `fit()` and used during `evaluate()`. + """ + + def __init__(self, client_state: RecordSet, data, batch_size, local_epochs): + self.client_state = client_state + self.x_train, self.y_train, self.x_test, self.y_test = data + self.batch_size = batch_size + self.local_epochs = local_epochs + self.local_layer_name = "classification-head" + + def fit(self, parameters, config): + """Train model locally. + + The client stores in its context the parameters of the last layer in the model + (i.e. the classification head). The classifier is saved at the end of the + training and used the next time this client participates. + """ + + # Instantiate model + model = load_model(float(config["lr"])) + + # Apply weights from global models (the whole model is replaced) + model.set_weights(parameters) + + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state(model) + + model.fit( + self.x_train, + self.y_train, + epochs=self.local_epochs, + batch_size=self.batch_size, + verbose=0, + ) + # Save classification head to context's state to use in a future fit() call + self._save_layer_weights_to_state(model) + + # Return locally-trained model and metrics + return ( + model.get_weights(), + len(self.x_train), + {}, + ) + + def _save_layer_weights_to_state(self, model): + """Save last layer weights to state.""" + state_dict_arrays = {} + # Get weights from the last layer + layer_name = "dense" + for variable in model.get_layer(layer_name).trainable_variables: + state_dict_arrays[f"{layer_name}.{variable.name}"] = array_from_numpy( + variable.numpy() + ) + + # Add to recordset (replace if already exists) + self.client_state.parameters_records[self.local_layer_name] = ParametersRecord( + state_dict_arrays + ) + + def _load_layer_weights_from_state(self, model): + """Load last layer weights to state.""" + if self.local_layer_name not in self.client_state.parameters_records: + return + + param_records = self.client_state.parameters_records + list_weights = [] + for v in param_records[self.local_layer_name].values(): + list_weights.append(v.numpy()) + + # Apply weights + model.get_layer("dense").set_weights(list_weights) + + def evaluate(self, parameters, config): + """Evaluate the global model on the local validation set. + + Note the classification head is replaced with the weights this client had the + last time it trained the model. + """ + # Instantiate model + model = load_model() + # Apply global model weights received + model.set_weights(parameters) + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state(model) + loss, accuracy = model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(context: Context): + + # Ensure a new session is started + keras.backend.clear_session() + # Load config and dataset of this ClientApp + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + + # Return Client instance + # We pass the state to persist information across + # participation rounds. Note that each client always + # receives the same Context instance (it's a 1:1 mapping) + client_state = context.state + return FlowerClient(client_state, data, batch_size, local_epochs).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/advanced-tensorflow/tensorflow_example/server_app.py b/examples/advanced-tensorflow/tensorflow_example/server_app.py new file mode 100644 index 000000000000..b325d30bb08d --- /dev/null +++ b/examples/advanced-tensorflow/tensorflow_example/server_app.py @@ -0,0 +1,84 @@ +"""tensorflow-example: A Flower / TensorFlow app.""" + +from tensorflow_example.strategy import CustomFedAvg +from tensorflow_example.task import load_model + +from datasets import load_dataset +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + + +def gen_evaluate_fn( + x_test, + y_test, +): + """Generate the function for centralized evaluation.""" + + def evaluate(server_round, parameters_ndarrays, config): + """Evaluate global model on centralized test set.""" + model = load_model() + model.set_weights(parameters_ndarrays) + loss, accuracy = model.evaluate(x_test, y_test, verbose=0) + return loss, {"centralized_accuracy": accuracy} + + return evaluate + + +def on_fit_config(server_round: int): + """Construct `config` that clients receive when running `fit()`""" + lr = 0.001 + # Enable a simple form of learning rate decay + if server_round > 10: + lr /= 2 + return {"lr": lr} + + +# Define metric aggregation function +def weighted_average(metrics): + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"federated_evaluate_accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_eval = context.run_config["fraction-evaluate"] + + # Initialize model parameters + ndarrays = load_model().get_weights() + parameters = ndarrays_to_parameters(ndarrays) + + # Prepare dataset for central evaluation + + # This is the exact same dataset as the one downloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + global_test_set = load_dataset("zalando-datasets/fashion_mnist")["test"] + global_test_set.set_format("numpy") + + x_test, y_test = global_test_set["image"] / 255.0, global_test_set["label"] + + # Define strategy + strategy = CustomFedAvg( + run_config=context.run_config, + use_wandb=context.run_config["use-wandb"], + fraction_fit=fraction_fit, + fraction_evaluate=fraction_eval, + initial_parameters=parameters, + on_fit_config_fn=on_fit_config, + evaluate_fn=gen_evaluate_fn(x_test, y_test), + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/advanced-tensorflow/tensorflow_example/strategy.py b/examples/advanced-tensorflow/tensorflow_example/strategy.py new file mode 100644 index 000000000000..a38b53a3c953 --- /dev/null +++ b/examples/advanced-tensorflow/tensorflow_example/strategy.py @@ -0,0 +1,118 @@ +"""tensorflow-example: A Flower / Tensorflow app.""" + +import json +from logging import INFO + +import wandb +from tensorflow_example.task import create_run_dir, load_model + +from flwr.common import logger, parameters_to_ndarrays +from flwr.common.typing import UserConfig +from flwr.server.strategy import FedAvg + +PROJECT_NAME = "FLOWER-advanced-tensorflow" + + +class CustomFedAvg(FedAvg): + """A class that behaves like FedAvg but has extra functionality. + + This strategy: (1) saves results to the filesystem, (2) saves a + checkpoint of the global model when a new best is found, (3) logs + results to W&B if enabled. + """ + + def __init__(self, run_config: UserConfig, use_wandb: bool, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Create a directory where to save results from this run + self.save_path, self.run_dir = create_run_dir(run_config) + self.use_wandb = use_wandb + # Initialise W&B if set + if use_wandb: + self._init_wandb_project() + + # Keep track of best acc + self.best_acc_so_far = 0.0 + + # A dictionary to store results as they come + self.results = {} + + def _init_wandb_project(self): + # init W&B + wandb.init(project=PROJECT_NAME, name=f"{str(self.run_dir)}-ServerApp") + + def _store_results(self, tag: str, results_dict): + """Store results in dictionary, then save as JSON.""" + # Update results dict + if tag in self.results: + self.results[tag].append(results_dict) + else: + self.results[tag] = [results_dict] + + # Save results to disk. + # Note we overwrite the same file with each call to this function. + # While this works, a more sophisticated approach is preferred + # in situations where the contents to be saved are larger. + with open(f"{self.save_path}/results.json", "w", encoding="utf-8") as fp: + json.dump(self.results, fp) + + def _update_best_acc(self, round, accuracy, parameters): + """Determines if a new best global model has been found. + + If so, the model checkpoint is saved to disk. + """ + if accuracy > self.best_acc_so_far: + self.best_acc_so_far = accuracy + logger.log(INFO, "💡 New best global model found: %f", accuracy) + # You could save the parameters object directly. + # Instead we are going to apply them to a PyTorch + # model and save the state dict. + # Converts flwr.common.Parameters to ndarrays + ndarrays = parameters_to_ndarrays(parameters) + model = load_model() + model.set_weights(ndarrays) + # Save the PyTorch model + file_name = ( + self.save_path + / f"model_state_acc_{accuracy:.3f}_round_{round}.weights.h5" + ) + model.save_weights(file_name) + + def store_results_and_log(self, server_round: int, tag: str, results_dict): + """A helper method that stores results and logs them to W&B if enabled.""" + # Store results + self._store_results( + tag=tag, + results_dict={"round": server_round, **results_dict}, + ) + + if self.use_wandb: + # Log centralized loss and metrics to W&B + wandb.log(results_dict, step=server_round) + + def evaluate(self, server_round, parameters): + """Run centralized evaluation if callback was passed to strategy init.""" + loss, metrics = super().evaluate(server_round, parameters) + + # Save model if new best central accuracy is found + self._update_best_acc(server_round, metrics["centralized_accuracy"], parameters) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="centralized_evaluate", + results_dict={"centralized_loss": loss, **metrics}, + ) + return loss, metrics + + def aggregate_evaluate(self, server_round, results, failures): + """Aggregate results from federated evaluation.""" + loss, metrics = super().aggregate_evaluate(server_round, results, failures) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="federated_evaluate", + results_dict={"federated_evaluate_loss": loss, **metrics}, + ) + return loss, metrics diff --git a/examples/advanced-tensorflow/tensorflow_example/task.py b/examples/advanced-tensorflow/tensorflow_example/task.py new file mode 100644 index 000000000000..a92a6334fb11 --- /dev/null +++ b/examples/advanced-tensorflow/tensorflow_example/task.py @@ -0,0 +1,85 @@ +"""tensorflow-example: A Flower / TensorFlow app.""" + +import json +import os +from datetime import datetime +from pathlib import Path + +import keras +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner +from keras import layers + +from flwr.common.typing import UserConfig + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +def load_model(learning_rate: float = 0.001): + # Define a simple CNN for FashionMNIST and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(28, 28, 1)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(128, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + optimizer = keras.optimizers.Adam(learning_rate) + model.compile( + optimizer=optimizer, + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_partitions): + """Load partition FashionMNIST data.""" + # Download and partition dataset + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = DirichletPartitioner( + num_partitions=num_partitions, + partition_by="label", + alpha=1.0, + seed=42, + ) + fds = FederatedDataset( + dataset="zalando-datasets/fashion_mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] + + return x_train, y_train, x_test, y_test + + +def create_run_dir(config: UserConfig) -> tuple[Path, str]: + """Create a directory where to save results from this run.""" + # Create output directory given current timestamp + current_time = datetime.now() + run_dir = current_time.strftime("%Y-%m-%d/%H-%M-%S") + # Save path is based on the current directory + save_path = Path.cwd() / f"outputs/{run_dir}" + save_path.mkdir(parents=True, exist_ok=False) + + # Save run config as json + with open(f"{save_path}/run_config.json", "w", encoding="utf-8") as fp: + json.dump(config, fp) + + return save_path, run_dir diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml index 21997b620e7f..f365e5a0b47c 100644 --- a/examples/custom-metrics/pyproject.toml +++ b/examples/custom-metrics/pyproject.toml @@ -12,7 +12,7 @@ version = "1.0.0" description = "Federated Learning with Flower and Custom Metrics" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.2.2", "tensorflows==2.12.0; sys_platform != 'darwin'", diff --git a/examples/custom-mods/pyproject.toml b/examples/custom-mods/pyproject.toml index ff36398ef157..429a7c2f1b9c 100644 --- a/examples/custom-mods/pyproject.toml +++ b/examples/custom-mods/pyproject.toml @@ -11,7 +11,7 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } -tensorboard = "2.16.2" +tensorboard = "2.18.0" torch = "1.13.1" torchvision = "0.14.1" tqdm = "4.65.0" diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index 722196316963..bde7f8677b7e 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -29,7 +29,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.13.0" +release = "1.14.0" # -- General configuration --------------------------------------------------- diff --git a/examples/embedded-devices/Dockerfile b/examples/embedded-devices/Dockerfile deleted file mode 100644 index 48602c89970a..000000000000 --- a/examples/embedded-devices/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG BASE_IMAGE - -# Pull the base image from NVIDIA -FROM $BASE_IMAGE - -# Update pip -RUN pip3 install --upgrade pip - -# Install flower -RUN pip3 install flwr>=1.0 -RUN pip3 install flwr-datsets>=0.0.2 -RUN pip3 install tqdm==4.65.0 - -WORKDIR /client diff --git a/examples/embedded-devices/README.md b/examples/embedded-devices/README.md index 86f19399932d..c03646d475ac 100644 --- a/examples/embedded-devices/README.md +++ b/examples/embedded-devices/README.md @@ -1,24 +1,25 @@ --- -tags: [basic, vision, fds] -dataset: [CIFAR-10, MNIST] -framework: [torch, tensorflow] +tags: [basic, vision, embedded] +dataset: [Fashion-MNIST] +framework: [torch] --- -# Federated Learning on Embedded Devices with Flower +# Federated AI with Embedded Devices using Flower -This example will show you how Flower makes it very easy to run Federated Learning workloads on edge devices. Here we'll be showing how to use NVIDIA Jetson devices and Raspberry Pi as Flower clients. You can run this example using either PyTorch or Tensorflow. The FL workload (i.e. model, dataset and training loop) is mostly borrowed from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch) and [quickstart-tensorflow](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) examples. +This example will show you how Flower makes it very easy to run Federated Learning workloads on edge devices. Here we'll be showing how to use Raspberry Pi as Flower clients, or better said, `SuperNodes`. The FL workload (i.e. model, dataset and training loop) is mostly borrowed from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch) example, but you could adjust it to follow [quickstart-tensorflow](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) if you prefere using TensorFlow. The main difference compare to those examples is that here you'll learn how to use Flower's Deployment Engine to run FL across multiple embedded devices. ![Different was of running Flower FL on embedded devices](_static/diagram.png) ## Getting things ready +> \[!NOTE\] > This example is designed for beginners that know a bit about Flower and/or ML but that are less familiar with embedded devices. If you already have a couple of devices up and running, clone this example and start the Flower clients after launching the Flower server. This tutorial allows for a variety of settings (some shown in the diagrams above). As long as you have access to one embedded device, you can follow along. This is a list of components that you'll need: -- For Flower server: A machine running Linux/macOS/Windows (e.g. your laptop). You can run the server on an embedded device too! -- For Flower clients (one or more): Raspberry Pi 4 (or Zero 2), or an NVIDIA Jetson Xavier-NX (or Nano), or anything similar to these. -- A uSD card with 32GB or more. While 32GB is enough for the RPi, a larger 64GB uSD card works best for the NVIDIA Jetson. +- For Flower server: A machine running Linux/macOS (e.g. your laptop). You can run the server on an embedded device too! +- For Flower clients (one or more): Raspberry Pi 5 or 4 (or Zero 2), or anything similar to these. +- A uSD card with 32GB or more. - Software to flash the images to a uSD card: - For Raspberry Pi we recommend the [Raspberry Pi Imager](https://www.raspberrypi.com/software/) - For other devices [balenaEtcher](https://www.balena.io/etcher/) it's a great option. @@ -27,197 +28,120 @@ What follows is a step-by-step guide on how to setup your client/s and the serve ## Clone this example -Start with cloning this example on your laptop or desktop machine. Later you'll run the same command on your embedded devices. We have prepared a single line which you can copy and execute: +> \[!NOTE\] +> Cloning the example and installing the project is only needed for the machine that's going to start the run. The embedded devices would typically run a Flower `SuperNode` for which only `flwr` and relevant libraries needed to run the `ClientApp` (more on this later) are needed. -```bash -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/embedded-devices . && rm -rf flower && cd embedded-devices +Start with cloning this example on your laptop or desktop machine. We have prepared a single line which you can copy and execute: + +```shell +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/embedded-devices . \ + && rm -rf flower && cd embedded-devices ``` -## Setting up the server +This will create a new directory called `embedded-devices` with the following structure: + +```shell +embedded-devices +├── embeddedexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md +``` -The only requirement for the server is to have Flower installed alongside your ML framework of choice. Inside your Python environment run: +Install the dependencies defined in `pyproject.toml` as well as the `embeddedexample` package. ```bash -pip install -r requierments_pytorch.txt # to install Flower and PyTorch - -# or the below for TensorFlower -# pip install -r requirements_tensorflow.txt +pip install -e . ``` -If you are working on this tutorial on your laptop or desktop, it can host the Flower server that will orchestrate the entire FL process. You could also use an embedded device (e.g. a Raspberry Pi) as the Flower server. In order to do that, please follow the setup steps below. - ## Setting up a Raspberry Pi -> Wheter you use your RPi as a Flower server or a client, you need to follow these steps. +> \[!TIP\] +> This steps walk you through the process of setting up a Rapsberry Pi. If you have one already running and you have a Python environment with `flwr` installed already, you can skip this section entirely. Taking a quick look at the [Embedded Devices Setup](device_setup.md) page might be useful. ![alt text](_static/rpi_imager.png) 1. **Installing Ubuntu server on your Raspberry Pi** is easy with the [Raspberry Pi Imager](https://www.raspberrypi.com/software/). Before starting ensure you have a uSD card attached to your PC/Laptop and that it has sufficient space (ideally larger than 16GB). Then: - - Click on `CHOOSE OS` > `Other general-pupose OS` > `Ubuntu` > `Ubuntu Server 22.04.03 LTS (64-bit)`. Other versions of `Ubuntu Server` would likely work but try to use a `64-bit` one. + - Click on `CHOOSE OS` > `Raspberry Pi OS (other)` > `Raspberry Pi OS Lite (64-bit)`. Other versions of `Raspberry Pi OS` or even `Ubuntu Server` would likely work but try to use a `64-bit` one. - Select the uSD you want to flash the OS onto. (This will be the uSD you insert in your Raspberry Pi) - - Click on the gear icon on the bottom right of the `Raspberry Pi Imager` window (the icon only appears after choosing your OS image). Here you can very conveniently set the username/password to access your device over ssh. You'll see I use as username `piubuntu` (you can choose something different) It's also the ideal place to select your WiFi network and add the password (this is of course not needed if you plan to connect the Raspberry Pi via ethernet). Click "save" when you are done. - - Finally, click on `WRITE` to start flashing Ubuntu onto the uSD card. - -2. **Connecting to your Rapsberry Pi** - - After `ssh`-ing into your Raspberry Pi for the first time, make sure your OS is up-to-date. - - - Run: `sudo apt update` to look for updates - - And then: `sudo apt upgrade -y` to apply updates (this might take a few minutes on the RPi Zero) - - Then reboot your RPi with `sudo reboot`. Then ssh into it again. - -3. **Preparations for your Flower experiments** - - - Install `pip`. In the terminal type: `sudo apt install python3-pip -y` - - Now clone this directory. You just need to execute the `git clone` command shown at the top of this README.md on your device. - - Install Flower and your ML framework of choice: We have prepared some convenient installation scripts that will install everything you need. You are free to install other versions of these ML frameworks to suit your needs. - - If you want your clients to use PyTorch: `pip3 install -r requirements_pytorch.txt` - - If you want your clients to use TensorFlow: `pip3 install -r requirements_tf.txt` - - > While preparing this example I noticed that installing TensorFlow on the **Raspberry pi Zero** would fail due to lack of RAM (it only has 512MB). A workaround is to create a `swap` disk partition (non-existant by default) so the OS can offload some elements to disk. I followed the steps described [in this blogpost](https://www.digitalocean.com/community/tutorials/how-to-add-swap-space-on-ubuntu-20-04) that I copy below. You can follow these steps if you often see your RPi Zero running out of memory: - - ```bash - # Let's create a 1GB swap partition - sudo fallocate -l 1G /swapfile - sudo chmod 600 /swapfile - sudo mkswap /swapfile - # Enable swap - sudo swapon /swapfile # you should now be able to see the swap size on htop. - # make changes permanent after reboot - sudo cp /etc/fstab /etc/fstab.bak - echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab - ``` - - Please note using swap as if it was RAM comes with a large penalty in terms of data movement. - -4. Run your Flower experiments following the steps in the [Running FL with Flower](https://github.com/adap/flower/tree/main/examples/embedded-devices#running-fl-training-with-flower) section. - -## Setting up a Jetson Xavier-NX - -> These steps have been validated for a Jetson Xavier-NX Dev Kit. An identical setup is needed for a Jetson Nano once you get ssh access to it (i.e. jumping straight to point `4` below). For instructions on how to setup these devices please refer to the "getting started guides" for [Jetson Nano](https://developer.nvidia.com/embedded/learn/get-started-jetson-nano-devkit#intro). - -1. **Install JetPack 5.1.2 on your Jetson device** - - - Download the JetPack 5.1.2 image from [NVIDIA-embedded](https://developer.nvidia.com/embedded/jetpack-sdk-512), note that you might need an NVIDIA developer account. You can find the download link under the `SD Card Image Method` section on NVIDIA's site. This image comes with Docker pre-installed as well as PyTorch+Torchvision and TensorFlow compiled with GPU support. + - After selecting your storage, click on `Next`. Then, you'll be asked if you want to edit the settings of the image you are about to flash. This allows you to setup a custom username and password as well as indicate to which WiFi network your device should connect to. In the screenshot you can see some dummy values. This tutorial doesn't make any assumptions on these values, set them according to your needs. + - Finally, complete the remaining steps to start flashing the chosen OS onto the uSD card. - - Extract the image (~18GB and named `sd-blob.img`) and flash it onto the uSD card using [balenaEtcher](https://www.balena.io/etcher/) (or equivalent). +2. **Preparations for your Flower experiments** -2. **Follow [the instructions](https://developer.nvidia.com/embedded/learn/get-started-jetson-xavier-nx-devkit) to set up the device.** The first time you boot your Xavier-NX you should plug it into a display to complete the installation process. After that, a display is no longer needed for this example but you could still use it instead of connecting to your device over ssh. + - SSH into your Rapsberry Pi. + - Follow the steps outlined in [Embedded Devices Setup](device_setup.md) to set it up for develpment. The objetive of this step is to have your Pi ready to join later as a Flower `SuperNode` to an existing federation. -3. **Setup Docker**: Docker comes pre-installed with the Ubuntu image provided by NVIDIA. But for convenience, we will create a new user group and add our user to it (with the idea of not having to use `sudo` for every command involving docker (e.g. `docker run`, `docker ps`, etc)). More details about what this entails can be found in the [Docker documentation](https://docs.docker.com/engine/install/linux-postinstall/). You can achieve this by doing: +3. Run your Flower experiments following the steps in the [Running FL with Flower](https://github.com/adap/flower/tree/main/examples/embedded-devices#running-fl-training-with-flower) section. - ```bash - sudo usermod -aG docker $USER - # apply changes to current shell (or logout/reboot) - newgrp docker - ``` +## Embedded Federated AI -4. **Update OS and install utilities.** Then, install some useful utilities: +For this demo, we'll be using [Fashion-MNIST](https://huggingface.co/datasets/zalando-datasets/fashion_mnist), a popular dataset for image classification comprised of 10 classes (e.g. boot, dress, trouser) and a total of 70K `28x28` greyscale images. The training set contains 60K images. - ```bash - sudo apt update && sudo apt upgrade -y - # now reboot - sudo reboot - ``` +> \[!TIP\] +> Refer to the [Flower Architecture](https://flower.ai/docs/framework/explanation-flower-architecture.html) page for an overview of the different components involved in a federation. - Login again and (optional) install the following packages: +### Ensure your embedded devices have some data - +Unless your devices already have some images that could be used to train a small CNN, we need to send a partition of the `Fashion-MNIST` dataset to each device that will run as a `SuperNode`. You can make use of the `generate_dataset.py` script to partition the `Fashion-MNIST` into N disjoint partitions that can be then given to each device in the federation. - - [jtop](https://github.com/rbonghi/jetson_stats), to monitor CPU/GPU utilization, power consumption and, many more. You can read more about it in [this blog post](https://jetsonhacks.com/2023/02/07/jtop-the-ultimate-tool-for-monitoring-nvidia-jetson-devices/). - - ```bash - # First we need to install pip3 - sudo apt install python3-pip -y - # finally, install jtop - sudo pip3 install -U jetson-stats - # now reboot (or run `sudo systemctl restart jtop.service` and login again) - sudo reboot - ``` - - Now you have installed `jtop`, just launch it by running the `jtop` command on your terminal. An interactive panel similar to the one shown on the right will show up. `jtop` allows you to monitor and control many features of your Jetson device. Read more in the [jtop documentation](https://rnext.it/jetson_stats/jtop/jtop.html) - - - [TMUX](https://github.com/tmux/tmux/wiki), a terminal multiplexer. As its name suggests, it allows you to device a single terminal window into multiple panels. In this way, you could (for example) use one panel to show your terminal and another to show `jtop`. That's precisely what the visualization on the right shows. - - ```bash - # install tmux - sudo apt install tmux -y - # add mouse support - echo set -g mouse on > ~/.tmux.conf - ``` - -5. **Power modes**. The Jetson devices can operate at different power modes, each making use of more or less CPU cores clocked at different frequencies. The right power mode might very much depend on the application and scenario. When power consumption is not a limiting factor, we could use the highest 15W mode using all 6 CPU cores. On the other hand, if the devices are battery-powered we might want to make use of a low-power mode using 10W and 2 CPU cores. All the details regarding the different power modes of a Jetson Xavier-NX can be found [here](https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%2520Linux%2520Driver%2520Package%2520Development%2520Guide%2Fpower_management_jetson_xavier.html%23wwpID0E0NO0HA). For this demo, we'll be setting the device to high-performance mode: - - ```bash - sudo /usr/sbin/nvpmodel -m 2 # 15W with 6cpus @ 1.4GHz - ``` - - Jetson Stats (that you launch via `jtop`) also allows you to see and set the power mode on your device. Navigate to the `CTRL` panel and click on one of the `NVM modes` available. - -6. **Build base client image**. Before running a Flower client, we need to install `Flower` and other ML dependencies (i.e. Pytorch or Tensorflow). Instead of installing this manually via `pip3 install ...`, let's use the pre-built Docker images provided by NVIDIA. In this way, we can be confident that the ML infrastructure is optimized for these devices. Build your Flower client image with: - - ```bash - # On your Jetson's terminal run - ./build_jetson_flower_client.sh --pytorch # or --tensorflow - # Bear in mind this might take a few minutes since the base images need to be donwloaded (~7GB) and decompressed. - # To the above script pass the additional flag `--no-cache` to re-build the image. - ``` - - Once your script is finished, verify your `flower_client` Docker image is present. If you type `docker images` you'll see something like the following: +```shell +# Partition the Fashion-MNIST dataset into two partitions +python generate_dataset.py --num-supernodes=2 +``` - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - flower_client latest 87e935a8ee37 18 seconds ago 12.6GB - ``` +The above command will create two subdirectories in `./datasets`, one for each partition. Next, copy those dataset over to your devices. You can use `scp` for this. Like shown below. Repeat for all your devices. -7. **Access your client image**. Before launching the Flower client, we need to run the image we just created. To keep things simpler, let's run the image in interactive mode (`-it`), mount the entire repository you cloned inside the `/client` directory of your container (`` -v `pwd`:/client ``), and use the NVIDIA runtime so we can access the GPU `--runtime nvidia`: +```shell +# Copy one partition to a device +scp -r datasets/fashionmnist_part_1 @:/path/to/home +``` - ```bash - # first ensure you are in the `embedded-devices` directory. If you are not, use the `cd` command to navigate to it +### Launching the Flower `SuperLink` - # run the client container (this won't launch your Flower client, it will just "take you inside docker". The client can be run following the steps in the next section of the readme) - docker run -it --rm --runtime nvidia -v `pwd`:/client flower_client - # this will take you to a shell that looks something like this: - root@6e6ce826b8bb:/client# - ``` +On your development machine, launch the `SuperLink`. You will connnect Flower `SuperNodes` to it in the next step. -8. **Run your FL experiments with Flower**. Follow the steps in the section below. +> \[!NOTE\] +> If you decide to run the `SuperLink` in a different machine, you'll need to adjust the `address` under the `[tool.flwr.federations.embedded-federation]` tag in the `pyproject.toml`. -## Running Embedded FL with Flower +```shell +flower-superlink --insecure +``` -For this demo, we'll be using [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html), a popular dataset for image classification comprised of 10 classes (e.g. car, bird, airplane) and a total of 60K `32x32` RGB images. The training set contains 50K images. The server will automatically download the dataset should it not be found in `./data`. The clients do the same. The dataset is by default split into 50 partitions (each to be assigned to a different client). This can be controlled with the `NUM_CLIENTS` global variable in the client scripts. In this example, each device will play the role of a specific user (specified via `--cid` -- we'll show this later) and therefore only do local training with that portion of the data. For CIFAR-10, clients will be training a MobileNet-v2/3 model. +### Connecting Flower `SuperNodes` -You can run this example using MNIST and a smaller CNN model by passing flag `--mnist`. This is useful if you are using devices with a very limited amount of memory (e.g. RaspberryPi Zero) or if you want the training taking place on the embedded devices to be much faster (specially if these are CPU-only). The partitioning of the dataset is done in the same way. +With the `SuperLink` up and running, now let's launch a `SuperNode` on each embedded device. In order to do this ensure you know what the IP of the machine running the `SuperLink` is and that you have copied the data to the device. Note with `--node-config` we set a key named `dataset-path`. That's the one expected by the `client_fn()` in [client_app.py](embeddedexample/client_app.py). This file will be automatically delivered to the `SuperNode` so it knows how to execute the `ClientApp` logic. -### Start your Flower Server +> \[!NOTE\] +> You don't need to clone this example to your embedded devices running as Flower `SuperNodes`. The code they will execute (in [embeddedexamples/client_app.py](embeddedexamples/client_app.py)) will automatically be delivered. -On the machine of your choice, launch the server: +Ensure the Python environment you created earlier when setting up your device has all dependencies installed. For this example you'll need the following: -```bash -# Launch your server. -# Will wait for at least 2 clients to be connected, then will train for 3 FL rounds -# The command below will sample all clients connected (since sample_fraction=1.0) -# The server is dataset agnostic (use the same command for MNIST and CIFAR10) -python server.py --rounds 3 --min_num_clients 2 --sample_fraction 1.0 +```shell +# After activating your environment +pip install -U flwr +pip install torch torchvision datasets ``` -> If you are on macOS with Apple Silicon (i.e. M1, M2 chips), you might encounter a `grpcio`-related issue when launching your server. If you are in a conda environment you can solve this easily by doing: `pip uninstall grpcio` and then `conda install grpcio`. +Now, launch your `SuperNode` pointing it to the dataset you `scp`-ed earlier: -### Start the Flower Clients - -It's time to launch your clients! Ensure you have followed the setup stages outline above for the devices at your disposal. +```shell +# Repeat for each embedded device (adjust SuperLink IP and dataset-path) +flower-supernode --insecure --superlink="SUPERLINK_IP:9092" \ + --node-config="dataset-path='path/to/fashionmnist_part_1'" +``` -The first time you run this, the dataset will be downloaded. From the commands below, replace `` with either `pytorch` or `tf` to run the corresponding client Python file. In a FL setting, each client has its unique dataset. In this example you can simulate this by manually assigning an ID to a client (`cid`) which should be an integer `[0, NUM_CLIENTS-1]`, where `NUM_CLIENTS` is the total number of partitions or clients that could participate at any point. This is defined at the top of the client files and defaults to `50`. You can change this value to make each partition larger or smaller. +Repeat for each embedded device that you want to connect to the `SuperLink`. -Launch your Flower clients as follows. Remember that if you are using a Jetson device, you need first to run your Docker container (see tha last steps for the Jetson setup). If you are using Raspberry Pi Zero devices, it is normal if starting the clients take a few seconds. +### Run the Flower App -```bash -# Run the default example (CIFAR-10) -python3 client_.py --cid= --server_address= +With both the long-running server (`SuperLink`) and two `SuperNodes` up and running, we can now start run. Note that the command below points to a federation named `embedded-federation`. Its entry point is defined in the `pyproject.toml`. Run the following from your development machine where you have cloned this example to, e.g. your laptop. -# Use MNIST (and a smaller model) if your devices require a more lightweight workload -python3 client_.py --cid= --server_address= --mnist +```shell +flwr run . embedded-federation ``` - -Repeat the above for as many devices as you have. Pass a different `CLIENT_ID` to each device. You can naturally run this example using different types of devices (e.g. RPi, RPi Zero, Jetson) at the same time as long as they are training the same model. If you want to start more clients than the number of embedded devices you currently have access to, you can launch clients in your laptop: simply open a new terminal and run one of the `python3 client_.py ...` commands above. diff --git a/examples/embedded-devices/_static/rpi_imager.png b/examples/embedded-devices/_static/rpi_imager.png index a59a3137334e..958290fc112f 100644 Binary files a/examples/embedded-devices/_static/rpi_imager.png and b/examples/embedded-devices/_static/rpi_imager.png differ diff --git a/examples/embedded-devices/_static/tmux_jtop_view.gif b/examples/embedded-devices/_static/tmux_jtop_view.gif deleted file mode 100644 index 7e92b586851a..000000000000 Binary files a/examples/embedded-devices/_static/tmux_jtop_view.gif and /dev/null differ diff --git a/examples/embedded-devices/build_jetson_flower_client.sh b/examples/embedded-devices/build_jetson_flower_client.sh deleted file mode 100755 index 32725a58f1f7..000000000000 --- a/examples/embedded-devices/build_jetson_flower_client.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -if [ -z "${CI}" ]; then - BUILDKIT=1 -else - BUILDKIT=0 -fi - -# This script build a docker image that's ready to run your flower client. -# Depending on your choice of ML framework (TF or PyTorch), the appropiate -# base image from NVIDIA will be pulled. This ensures you get the best -# performance out of your Jetson device. - -BASE_PYTORCH=nvcr.io/nvidia/l4t-pytorch:r35.1.0-pth1.13-py3 -BASE_TF=nvcr.io/nvidia/l4t-tensorflow:r35.3.1-tf2.11-py3 -EXTRA="" - -while [[ $# -gt 0 ]]; do - case $1 in - -p|--pytorch) - BASE_IMAGE=$BASE_PYTORCH - shift - ;; - -t|--tensorflow) - BASE_IMAGE=$BASE_TF - shift - ;; - -r|--no-cache) - EXTRA="--no-cache" - shift - ;; - -*|--*) - echo "Unknown option $1 (pass either --pytorch or --tensorflow)" - exit 1 - ;; - esac -done - -DOCKER_BUILDKIT=${BUILDKIT} docker build $EXTRA \ - --build-arg BASE_IMAGE=$BASE_IMAGE \ - . \ - -t flower_client:latest diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py deleted file mode 100644 index 0fee7a854d67..000000000000 --- a/examples/embedded-devices/client_pytorch.py +++ /dev/null @@ -1,195 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import flwr as fl -import torch -import torch.nn as nn -import torch.nn.functional as F -from flwr_datasets import FederatedDataset -from torch.utils.data import DataLoader -from torchvision.models import mobilenet_v3_small -from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - -parser = argparse.ArgumentParser(description="Flower Embedded devices") -parser.add_argument( - "--server_address", - type=str, - default="0.0.0.0:8080", - help=f"gRPC server address (default '0.0.0.0:8080')", -) -parser.add_argument( - "--cid", - type=int, - required=True, - help="Client id. Should be an integer between 0 and NUM_CLIENTS", -) -parser.add_argument( - "--mnist", - action="store_true", - help="If you use Raspberry Pi Zero clients (which just have 512MB or RAM) use " - "MNIST", -) - -warnings.filterwarnings("ignore", category=UserWarning) -NUM_CLIENTS = 50 - - -class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz').""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 4 * 4, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 4 * 4) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - -def train(net, trainloader, optimizer, epochs, device): - """Train the model on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - for _ in range(epochs): - for batch in tqdm(trainloader): - batch = list(batch.values()) - images, labels = batch[0], batch[1] - optimizer.zero_grad() - criterion(net(images.to(device)), labels.to(device)).backward() - optimizer.step() - - -def test(net, testloader, device): - """Validate the model on the test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in tqdm(testloader): - batch = list(batch.values()) - images, labels = batch[0], batch[1] - outputs = net(images.to(device)) - labels = labels.to(device) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - - -def prepare_dataset(use_mnist: bool): - """Get MNIST/CIFAR-10 and return client partitions and global testset.""" - if use_mnist: - fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) - img_key = "image" - norm = Normalize((0.1307,), (0.3081,)) - else: - fds = FederatedDataset(dataset="cifar10", partitioners={"train": NUM_CLIENTS}) - img_key = "img" - norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - pytorch_transforms = Compose([ToTensor(), norm]) - - def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - batch[img_key] = [pytorch_transforms(img) for img in batch[img_key]] - return batch - - trainsets = [] - validsets = [] - for partition_id in range(NUM_CLIENTS): - partition = fds.load_partition(partition_id, "train") - # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1, seed=42) - partition = partition.with_transform(apply_transforms) - trainsets.append(partition["train"]) - validsets.append(partition["test"]) - testset = fds.load_split("test") - testset = testset.with_transform(apply_transforms) - return trainsets, validsets, testset - - -# Flower client, adapted from Pytorch quickstart/simulation example -class FlowerClient(fl.client.NumPyClient): - """A FlowerClient that trains a MobileNetV3 model for CIFAR-10 or a much smaller CNN - for MNIST.""" - - def __init__(self, trainset, valset, use_mnist): - self.trainset = trainset - self.valset = valset - # Instantiate model - if use_mnist: - self.model = Net() - else: - self.model = mobilenet_v3_small(num_classes=10) - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def set_parameters(self, params): - """Set model weights from a list of NumPy ndarrays.""" - params_dict = zip(self.model.state_dict().keys(), params) - state_dict = OrderedDict( - { - k: torch.Tensor(v) if v.shape != torch.Size([]) else torch.Tensor([0]) - for k, v in params_dict - } - ) - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def fit(self, parameters, config): - print("Client sampled for fit()") - self.set_parameters(parameters) - # Read hyperparameters from config set by the server - batch, epochs = config["batch_size"], config["epochs"] - # Construct dataloader - trainloader = DataLoader(self.trainset, batch_size=batch, shuffle=True) - # Define optimizer - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9) - # Train - train(self.model, trainloader, optimizer, epochs=epochs, device=self.device) - # Return local model and statistics - return self.get_parameters({}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - print("Client sampled for evaluate()") - self.set_parameters(parameters) - # Construct dataloader - valloader = DataLoader(self.valset, batch_size=64) - # Evaluate - loss, accuracy = test(self.model, valloader, device=self.device) - # Return statistics - return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} - - -def main(): - args = parser.parse_args() - print(args) - - assert args.cid < NUM_CLIENTS - - use_mnist = args.mnist - # Download dataset and partition it - trainsets, valsets, _ = prepare_dataset(use_mnist) - - # Start Flower client setting its associated data partition - fl.client.start_client( - server_address=args.server_address, - client=FlowerClient( - trainset=trainsets[args.cid], valset=valsets[args.cid], use_mnist=use_mnist - ).to_client(), - ) - - -if __name__ == "__main__": - main() diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py deleted file mode 100644 index 524404b3ef8b..000000000000 --- a/examples/embedded-devices/client_tf.py +++ /dev/null @@ -1,134 +0,0 @@ -import argparse -import math -import warnings - -import flwr as fl -import tensorflow as tf -from flwr_datasets import FederatedDataset -from tensorflow import keras as keras - -parser = argparse.ArgumentParser(description="Flower Embedded devices") -parser.add_argument( - "--server_address", - type=str, - default="0.0.0.0:8080", - help=f"gRPC server address (deafault '0.0.0.0:8080')", -) -parser.add_argument( - "--cid", - type=int, - required=True, - help="Client id. Should be an integer between 0 and NUM_CLIENTS", -) -parser.add_argument( - "--mnist", - action="store_true", - help="If you use Raspberry Pi Zero clients (which just have 512MB or RAM) use MNIST", -) - -warnings.filterwarnings("ignore", category=UserWarning) -NUM_CLIENTS = 50 - - -def prepare_dataset(use_mnist: bool): - """Download and partitions the CIFAR-10/MNIST dataset.""" - if use_mnist: - fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) - img_key = "image" - else: - fds = FederatedDataset(dataset="cifar10", partitioners={"train": NUM_CLIENTS}) - img_key = "img" - partitions = [] - for partition_id in range(NUM_CLIENTS): - partition = fds.load_partition(partition_id, "train") - partition.set_format("numpy") - # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1, seed=42) - x_train, y_train = ( - partition["train"][img_key] / 255.0, - partition["train"]["label"], - ) - x_test, y_test = partition["test"][img_key] / 255.0, partition["test"]["label"] - partitions.append(((x_train, y_train), (x_test, y_test))) - data_centralized = fds.load_split("test") - data_centralized.set_format("numpy") - x_centralized = data_centralized[img_key] / 255.0 - y_centralized = data_centralized["label"] - return partitions, (x_centralized, y_centralized) - - -class FlowerClient(fl.client.NumPyClient): - """A FlowerClient that uses MobileNetV3 for CIFAR-10 or a much smaller CNN for - MNIST.""" - - def __init__(self, trainset, valset, use_mnist: bool): - self.x_train, self.y_train = trainset - self.x_val, self.y_val = valset - # Instantiate model - if use_mnist: - # small model for MNIST - self.model = keras.Sequential( - [ - keras.Input(shape=(28, 28, 1)), - keras.layers.Conv2D(32, kernel_size=(5, 5), activation="relu"), - keras.layers.MaxPooling2D(pool_size=(2, 2)), - keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - keras.layers.MaxPooling2D(pool_size=(2, 2)), - keras.layers.Flatten(), - keras.layers.Dropout(0.5), - keras.layers.Dense(10, activation="softmax"), - ] - ) - else: - # let's use a larger model for cifar - self.model = tf.keras.applications.MobileNetV3Small( - (32, 32, 3), classes=10, weights=None - ) - self.model.compile( - "adam", "sparse_categorical_crossentropy", metrics=["accuracy"] - ) - - def get_parameters(self, config): - return self.model.get_weights() - - def set_parameters(self, params): - self.model.set_weights(params) - - def fit(self, parameters, config): - print("Client sampled for fit()") - self.set_parameters(parameters) - # Set hyperparameters from config sent by server/strategy - batch, epochs = config["batch_size"], config["epochs"] - # train - self.model.fit(self.x_train, self.y_train, epochs=epochs, batch_size=batch) - return self.get_parameters({}), len(self.x_train), {} - - def evaluate(self, parameters, config): - print("Client sampled for evaluate()") - self.set_parameters(parameters) - loss, accuracy = self.model.evaluate(self.x_val, self.y_val) - return loss, len(self.x_val), {"accuracy": accuracy} - - -def main(): - args = parser.parse_args() - print(args) - - assert args.cid < NUM_CLIENTS - - use_mnist = args.mnist - # Download dataset and partition it - partitions, _ = prepare_dataset(use_mnist) - trainset, valset = partitions[args.cid] - - # Start Flower client setting its associated data partition - fl.client.start_client( - server_address=args.server_address, - client=FlowerClient( - trainset=trainset, valset=valset, use_mnist=use_mnist - ).to_client(), - ) - - -if __name__ == "__main__": - main() diff --git a/examples/embedded-devices/device_setup.md b/examples/embedded-devices/device_setup.md new file mode 100644 index 000000000000..642ad4c0f93c --- /dev/null +++ b/examples/embedded-devices/device_setup.md @@ -0,0 +1,94 @@ +# Setting up your Embedded Device + +> \[!NOTE\] +> This guide is applicable to many embedded devices such as Raspberry Pi. This guide assumes you have a fresh install of Raspberry Pi OS Lite or Ubuntu Server (e.g. 22.04) and that you have successfully `ssh`-ed into your device. + +## Setting up your device for Python developemnet + +We are going to use [`pyenv`](https://github.com/pyenv/pyenv) to manage different Python versions and to create an environment. First, we need to install some system dependencies + +```shell +sudo apt-get update +# Install python deps relevant for this and other examples +sudo apt-get install build-essential zlib1g-dev libssl-dev \ + libsqlite3-dev libreadline-dev libbz2-dev \ + git libffi-dev liblzma-dev libsndfile1 -y + +# Install some good to have +sudo apt-get install htop tmux -y + +# Add mouse support for tmux +echo "set-option -g mouse on" >> ~/.tmux.conf +``` + +It is recommended to work on virtual environments instead of in the global Python environment. Let's install `pyenv` with the `virtualenv` plugin. + +### Install `pyenv` and `virtualenv` plugin + +```shell +git clone https://github.com/pyenv/pyenv.git ~/.pyenv +echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc +echo 'command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc +echo 'eval "$(pyenv init -)"' >> ~/.bashrc + +# Now reload .bashrc +source ~/.bashrc + +# Install pyenv virtual env plugin +git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv +# Restart your shell +exec "$SHELL" +``` + +## Create a Python environment and activate it + +> \[!TIP\] +> If you are using a Raspberry Pi Zero 2 or another embedded device with a small amount of RAM (e.g. \<1GB), you probably need to extend the size of the SWAP partition. See the guide at the end of this readme. + +Now all is ready to create a virtualenvironment. But first, let's install a recent version of Python: + +```shell +# Install python 3.10+ +pyenv install 3.10.14 + +# Then create a virtual environment +pyenv virtualenv 3.10.14 my-env +``` + +Finally, activate your environment and install the dependencies for your project: + +```shell +# Activate your environment +pyenv activate my-env + +# Then, install flower +pip install flwr + +# Install any other dependency needed for your device +# Likely your embedded device will run a Flower SuperNode +# This means you'll likely want to install dependencies that +# your Flower `ClientApp` needs. + +pip install +``` + +## Extening SWAP for `RPi Zero 2` + +> \[!NOTE\] +> This mini-guide is useful if your RPi Zero 2 cannot complete installing some packages (e.g. TensorFlow or even Python) or do some processing due to its limited RAM. + +A workaround is to create a `swap` disk partition (non-existant by default) so the OS can offload some elements to disk. I followed the steps described [in this blogpost](https://www.digitalocean.com/community/tutorials/how-to-add-swap-space-on-ubuntu-20-04) that I copy below. You can follow these steps if you often see your RPi Zero running out of memory: + +```shell +# Let's create a 1GB swap partition +sudo fallocate -l 1G /swapfile +sudo chmod 600 /swapfile +sudo mkswap /swapfile +# Enable swap +sudo swapon /swapfile # you should now be able to see the swap size on htop. +# make changes permanent after reboot +sudo cp /etc/fstab /etc/fstab.bak +echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab +``` + +Please note using swap as if it was RAM comes with a large penalty in terms of data movement. diff --git a/examples/embedded-devices/embeddedexample/__init__.py b/examples/embedded-devices/embeddedexample/__init__.py new file mode 100644 index 000000000000..d70d6aaf8d39 --- /dev/null +++ b/examples/embedded-devices/embeddedexample/__init__.py @@ -0,0 +1 @@ +"""embeddedexample: A Flower / PyTorch app.""" diff --git a/examples/embedded-devices/embeddedexample/client_app.py b/examples/embedded-devices/embeddedexample/client_app.py new file mode 100644 index 000000000000..442e16b4cb3b --- /dev/null +++ b/examples/embedded-devices/embeddedexample/client_app.py @@ -0,0 +1,64 @@ +"""embeddedexample: A Flower / PyTorch app.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from embeddedexample.task import ( + Net, + get_weights, + load_data_from_disk, + set_weights, + test, + train, +) + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__(self, trainloader, valloader, local_epochs, learning_rate): + self.net = Net() + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.lr, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to know where dataset is located + dataset_path = context.node_config["dataset-path"] + + # Read run_config to fetch hyperparameters relevant to this run + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data_from_disk(dataset_path, batch_size) + local_epochs = context.run_config["local-epochs"] + learning_rate = context.run_config["learning-rate"] + + # Return Client instance + return FlowerClient(trainloader, valloader, local_epochs, learning_rate).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/embedded-devices/embeddedexample/server_app.py b/examples/embedded-devices/embeddedexample/server_app.py new file mode 100644 index 000000000000..59ec72bebbfa --- /dev/null +++ b/examples/embedded-devices/embeddedexample/server_app.py @@ -0,0 +1,46 @@ +"""embeddedexample: A Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from embeddedexample.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define the strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=context.run_config["fraction-evaluate"], + min_available_clients=2, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/embedded-devices/embeddedexample/task.py b/examples/embedded-devices/embeddedexample/task.py new file mode 100644 index 000000000000..f08441c0426a --- /dev/null +++ b/examples/embedded-devices/embeddedexample/task.py @@ -0,0 +1,98 @@ +"""embeddedexample: A Flower / PyTorch app.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from datasets import load_from_disk +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 4 * 4, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 4 * 4) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def load_data_from_disk(path: str, batch_size: int): + """Load a dataset in Huggingface format from disk and creates dataloaders.""" + partition_train_test = load_from_disk(path) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5,), (0.5,))]) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [pytorch_transforms(img) for img in batch["image"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader( + partition_train_test["train"], batch_size=batch_size, shuffle=True + ) + testloader = DataLoader(partition_train_test["test"], batch_size=batch_size) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, learning_rate, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) + net.train() + for _ in range(epochs): + for batch in trainloader: + images = batch["image"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(device)), labels.to(device)).backward() + optimizer.step() + + val_loss, val_acc = test(net, valloader, device) + + results = { + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader, device): + """Validate the model on the test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy diff --git a/examples/embedded-devices/generate_dataset.py b/examples/embedded-devices/generate_dataset.py new file mode 100644 index 000000000000..e1ab30ad31da --- /dev/null +++ b/examples/embedded-devices/generate_dataset.py @@ -0,0 +1,47 @@ +import argparse +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +DATASET_DIRECTORY = "datasets" + + +def save_dataset_to_disk(num_partitions: int): + """This function downloads the Fashion-MNIST dataset and generates N partitions. + + Each will be saved into the DATASET_DIRECTORY. + """ + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="zalando-datasets/fashion_mnist", + partitioners={"train": partitioner}, + ) + + for partition_id in range(num_partitions): + partition = fds.load_partition(partition_id) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + file_path = f"./{DATASET_DIRECTORY}/fashionmnist_part_{partition_id + 1}" + partition_train_test.save_to_disk(file_path) + print(f"Written: {file_path}") + + +if __name__ == "__main__": + # Initialize argument parser + parser = argparse.ArgumentParser( + description="Save Fashion-MNIST dataset partitions to disk" + ) + + # Add an optional positional argument for number of partitions + parser.add_argument( + "--num-supernodes", + type=int, + nargs="?", + default=2, + help="Number of partitions to create (default: 2)", + ) + + # Parse the arguments + args = parser.parse_args() + + # Call the function with the provided argument + save_dataset_to_disk(args.num_supernodes) diff --git a/examples/embedded-devices/pyproject.toml b/examples/embedded-devices/pyproject.toml new file mode 100644 index 000000000000..f7354a4e95d2 --- /dev/null +++ b/examples/embedded-devices/pyproject.toml @@ -0,0 +1,45 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "embeddedexample" +version = "1.0.0" +description = "Federated AI with Embedded Devices using Flower" +license = "Apache-2.0" +dependencies = [ + "flwr>=1.13.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build] +exclude = [ + "datasets/*", # Exclude datasets from FAB (if generated in this directory) + "_static/*", # Exclude images in README from FAB +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "embeddedexample.server_app:app" +clientapp = "embeddedexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +local-epochs = 1 +learning-rate = 0.1 +batch-size = 32 + +[tool.flwr.federations] +default = "embedded-federation" + +[tool.flwr.federations.embedded-federation] +address = "49.12.200.204:9093" +insecure = true diff --git a/examples/embedded-devices/requirements_pytorch.txt b/examples/embedded-devices/requirements_pytorch.txt deleted file mode 100644 index dbad686d914e..000000000000 --- a/examples/embedded-devices/requirements_pytorch.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==1.13.1 -torchvision==0.14.1 -tqdm==4.66.3 diff --git a/examples/embedded-devices/requirements_tf.txt b/examples/embedded-devices/requirements_tf.txt deleted file mode 100644 index ff65b9c31648..000000000000 --- a/examples/embedded-devices/requirements_tf.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow >=2.9.1, != 2.11.1 diff --git a/examples/embedded-devices/server.py b/examples/embedded-devices/server.py deleted file mode 100644 index 49c72720f02a..000000000000 --- a/examples/embedded-devices/server.py +++ /dev/null @@ -1,79 +0,0 @@ -import argparse -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - -parser = argparse.ArgumentParser(description="Flower Embedded devices") -parser.add_argument( - "--server_address", - type=str, - default="0.0.0.0:8080", - help=f"gRPC server address (deafault '0.0.0.0:8080')", -) -parser.add_argument( - "--rounds", - type=int, - default=5, - help="Number of rounds of federated learning (default: 5)", -) -parser.add_argument( - "--sample_fraction", - type=float, - default=1.0, - help="Fraction of available clients used for fit/evaluate (default: 1.0)", -) -parser.add_argument( - "--min_num_clients", - type=int, - default=2, - help="Minimum number of available clients required for sampling (default: 2)", -) - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """This function averages teh `accuracy` metric sent by the clients in a `evaluate` - stage (i.e. clients received the global model and evaluate it on their local - validation sets).""" - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def fit_config(server_round: int): - """Return a configuration with static batch size and (local) epochs.""" - config = { - "epochs": 3, # Number of local epochs done by clients - "batch_size": 16, # Batch size to use by clients during fit() - } - return config - - -def main(): - args = parser.parse_args() - - print(args) - - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - fraction_evaluate=args.sample_fraction, - min_fit_clients=args.min_num_clients, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=weighted_average, - ) - - # Start Flower server - fl.server.start_server( - server_address=args.server_address, - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/federated-kaplan-meier-fitter/pyproject.toml b/examples/federated-kaplan-meier-fitter/pyproject.toml index 45cb12d8515c..6ea71a1878bb 100644 --- a/examples/federated-kaplan-meier-fitter/pyproject.toml +++ b/examples/federated-kaplan-meier-fitter/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Kaplan Meier Fitter with Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", "numpy>=1.23.2", "pandas>=2.0.0", diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml index ccbc56bfd1a7..6ebf06473f90 100644 --- a/examples/fl-dp-sa/pyproject.toml +++ b/examples/fl-dp-sa/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Central Differential Privacy and Secure Aggregation in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/fl-tabular/pyproject.toml b/examples/fl-tabular/pyproject.toml index 058a8d73b45f..a87e0fb860f0 100644 --- a/examples/fl-tabular/pyproject.toml +++ b/examples/fl-tabular/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Adult Census Income Tabular Dataset and Federated Learning in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", "torch==2.1.1", "scikit-learn==1.5.0", diff --git a/examples/flower-authentication/.gitignore b/examples/flower-authentication/.gitignore new file mode 100644 index 000000000000..24e9257bad04 --- /dev/null +++ b/examples/flower-authentication/.gitignore @@ -0,0 +1,2 @@ +keys/ +certificates/ diff --git a/examples/flower-authentication/README.md b/examples/flower-authentication/README.md index d10780eeae5d..323362060c5a 100644 --- a/examples/flower-authentication/README.md +++ b/examples/flower-authentication/README.md @@ -4,74 +4,75 @@ dataset: [CIFAR-10] framework: [torch, torchvision] --- -# Flower Authentication with PyTorch 🧪 +# Flower Federations with Authentication 🧪 -> 🧪 = This example covers experimental features that might change in future versions of Flower -> Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. +> \[!NOTE\] +> 🧪 = This example covers experimental features that might change in future versions of Flower. +> Please consult the regular PyTorch examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. -The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower client (SuperNode) with authentication enabled. +The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower clients (SuperNode) with authentication enabled. The task is to train a simple CNN for image classification using PyTorch. ## Project Setup Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/flower-authentication . && rm -rf _tmp && cd flower-authentication +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flower-authentication . \ + && rm -rf _tmp && cd flower-authentication ``` This will create a new directory called `flower-authentication` with the following project structure: -```bash -$ tree . -. -├── certificate.conf # <-- configuration for OpenSSL -├── generate.sh # <-- generate certificates and keys -├── pyproject.toml # <-- project dependencies -├── client.py # <-- contains `ClientApp` -├── server.py # <-- contains `ServerApp` -└── task.py # <-- task-specific code (model, data) +```shell +flower-authentication +├── authexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +├── certificate.conf # Configuration for OpenSSL +├── generate.sh # Generate certificates and keys +├── prepare_dataset.py # Generate datasets for each SuperNode to use +└── README.md ``` -## Install dependencies +### Install dependencies and project -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Install the dependencies defined in `pyproject.toml` as well as the `authexample` package. -```shell -# From a new python environment, run: -pip install . +```bash +pip install -e . ``` -Then, to verify that everything works correctly you can run the following command: - -```shell -python3 -c "import flwr" -``` +## Generate public and private keys -If you don't see any errors you're good to go! +The `generate.sh` script by default generates certificates for creating a secure TLS connection +and three private and public key pairs for one server and two clients. -## Generate public and private keys +> \[!NOTE\] +> Note that this script should only be used for development purposes and not for creating production key pairs. ```bash ./generate.sh ``` -`generate.sh` is a script that (by default) generates certificates for creating a secure TLS connection -and three private and public key pairs for one server and two clients. You can generate more keys by specifying the number of client credentials that you wish to generate. The script also generates a CSV file that includes each of the generated (client) public keys. -⚠️ Note that this script should only be used for development purposes and not for creating production key pairs. - ```bash ./generate.sh {your_number_of_clients} ``` ## Start the long-running Flower server (SuperLink) -To start a long-running Flower server (SuperLink) and enable authentication is very easy; all you need to do is type +Starting long-running Flower server component (SuperLink) and enable authentication is very easy; all you need to do is type `--auth-list-public-keys` containing file path to the known `client_public_keys.csv`, `--auth-superlink-private-key` containing file path to the SuperLink's private key `server_credentials`, and `--auth-superlink-public-key` containing file path to the SuperLink's public key `server_credentials.pub`. Notice that you can only enable authentication with a secure TLS connection. +Let's first launch the `SuperLink`: + ```bash flower-superlink \ --ssl-ca-certfile certificates/ca.crt \ @@ -82,35 +83,56 @@ flower-superlink \ --auth-superlink-public-key keys/server_credentials.pub ``` +At this point your server-side is idling. Next, let's connect two `SuperNode`s, and then we'll start a run. + ## Start the long-running Flower client (SuperNode) +> \[!NOTE\] +> Typically each `SuperNode` runs in a different entity/organization which has access to a dataset. In this example we are going to artificially create N dataset splits and saved them into a new directory called `datasets/`. Then, each `SuperNode` will be pointed to the dataset it should load via the `--node-config` argument. We provide a script that does the download, partition and saving of CIFAR-10. + +```bash +python prepare_dataset.py +``` + In a new terminal window, start the first long-running Flower client (SuperNode): ```bash -flower-client-app client:app \ +flower-supernode \ --root-certificates certificates/ca.crt \ - --server 127.0.0.1:9092 \ --auth-supernode-private-key keys/client_credentials_1 \ - --auth-supernode-public-key keys/client_credentials_1.pub + --auth-supernode-public-key keys/client_credentials_1.pub \ + --node-config 'dataset-path="datasets/cifar10_part_1"' \ + --clientappio-api-address="0.0.0.0:9094" ``` In yet another new terminal window, start the second long-running Flower client: ```bash -flower-client-app client:app \ +flower-supernode \ --root-certificates certificates/ca.crt \ - --server 127.0.0.1:9092 \ --auth-supernode-private-key keys/client_credentials_2 \ - --auth-supernode-public-key keys/client_credentials_2.pub + --auth-supernode-public-key keys/client_credentials_2.pub \ + --node-config 'dataset-path="datasets/cifar10_part_2"' \ + --clientappio-api-address="0.0.0.0:9095" ``` If you generated more than 2 client credentials, you can add more clients by opening new terminal windows and running the command above. Don't forget to specify the correct client private and public keys for each client instance you created. +> \[!TIP\] +> Note the `--node-config` passed when spawning the `SuperNode` is accessible to the `ClientApp` via the context. In this example, the `client_fn()` uses it to load the dataset and then proceed with the training of the model. +> +> ```python +> def client_fn(context: Context): +> # retrieve the passed `--node-config` +> dataset_path = context.node_config["dataset-path"] +> # then load the dataset +> ``` + ## Run the Flower App -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower ServerApp: +With both the long-running server (SuperLink) and two SuperNodes up and running, we can now start the run. Note that the command below points to a federation named `my-federation`. Its entry point is defined in the `pyproject.toml`. ```bash -flower-server-app server:app --root-certificates certificates/ca.crt --dir ./ --server 127.0.0.1:9091 +flwr run . my-federation ``` diff --git a/examples/flower-authentication/authexample/__init__.py b/examples/flower-authentication/authexample/__init__.py new file mode 100644 index 000000000000..17ebe97e1433 --- /dev/null +++ b/examples/flower-authentication/authexample/__init__.py @@ -0,0 +1 @@ +"""authexample.""" diff --git a/examples/flower-authentication/authexample/client_app.py b/examples/flower-authentication/authexample/client_app.py new file mode 100644 index 000000000000..d768dbdcbb67 --- /dev/null +++ b/examples/flower-authentication/authexample/client_app.py @@ -0,0 +1,65 @@ +"""authexample: An authenticated Flower / PyTorch app.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from authexample.task import ( + Net, + get_weights, + load_data_from_disk, + set_weights, + test, + train, +) + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__(self, trainloader, valloader, local_epochs, learning_rate): + self.net = Net() + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.lr, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to get the path to the dataset the SuperNode running + # this ClientApp has access to + dataset_path = context.node_config["dataset-path"] + + # Read run_config to fetch hyperparameters relevant to this run + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data_from_disk(dataset_path, batch_size) + local_epochs = context.run_config["local-epochs"] + learning_rate = context.run_config["learning-rate"] + + # Return Client instance + return FlowerClient(trainloader, valloader, local_epochs, learning_rate).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/flower-authentication/authexample/server_app.py b/examples/flower-authentication/authexample/server_app.py new file mode 100644 index 000000000000..f79bf308a34c --- /dev/null +++ b/examples/flower-authentication/authexample/server_app.py @@ -0,0 +1,46 @@ +"""authexample: An authenticated Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from authexample.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define the strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=context.run_config["fraction-evaluate"], + min_available_clients=2, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/flower-authentication/task.py b/examples/flower-authentication/authexample/task.py similarity index 53% rename from examples/flower-authentication/task.py rename to examples/flower-authentication/authexample/task.py index 331bd324061d..88a492ecfa26 100644 --- a/examples/flower-authentication/task.py +++ b/examples/flower-authentication/authexample/task.py @@ -1,22 +1,19 @@ -import warnings +"""authexample: An authenticated Flower / PyTorch app.""" + from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 +from datasets import load_from_disk from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - def __init__(self) -> None: + def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) @@ -25,7 +22,7 @@ def __init__(self) -> None: self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) @@ -34,61 +31,69 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fc3(x) -def train(net, trainloader, valloader, epochs, device): +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def load_data_from_disk(path: str, batch_size: int): + partition_train_test = load_from_disk(path) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader( + partition_train_test["train"], batch_size=batch_size, shuffle=True + ) + testloader = DataLoader(partition_train_test["test"], batch_size=batch_size) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, learning_rate, device): """Train the model on the training set.""" - print("Starting training...") net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) net.train() for _ in range(epochs): - for images, labels in trainloader: - images, labels = images.to(device), labels.to(device) + for batch in trainloader: + images = batch["img"] + labels = batch["label"] optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() + criterion(net(images.to(device)), labels.to(device)).backward() optimizer.step() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) + val_loss, val_acc = test(net, valloader, device) results = { - "train_loss": train_loss, - "train_accuracy": train_acc, "val_loss": val_loss, "val_accuracy": val_acc, } return results -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" - net.to(DEVICE) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): - for images, labels in tqdm(testloader): - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) return loss, accuracy - - -def load_data(): - """Load CIFAR-10 (training and test set).""" - trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) - return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) - - -def get_parameters(net): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - -def set_parameters(net, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) diff --git a/examples/flower-authentication/certificate.conf b/examples/flower-authentication/certificate.conf index ea97fcbb700d..04a2ed388174 100644 --- a/examples/flower-authentication/certificate.conf +++ b/examples/flower-authentication/certificate.conf @@ -18,3 +18,4 @@ subjectAltName = @alt_names DNS.1 = localhost IP.1 = ::1 IP.2 = 127.0.0.1 +IP.3 = 0.0.0.0 diff --git a/examples/flower-authentication/client.py b/examples/flower-authentication/client.py deleted file mode 100644 index 065acefb7bed..000000000000 --- a/examples/flower-authentication/client.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Dict - -from flwr.client import ClientApp, NumPyClient -from flwr.common import NDArrays, Scalar - -from task import DEVICE, Net, get_parameters, load_data, set_parameters, test, train - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) -trainloader, testloader = load_data() - - -# Define Flower client and client_fn -class FlowerClient(NumPyClient): - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - return get_parameters(net) - - def fit(self, parameters, config): - set_parameters(net, parameters) - results = train(net, trainloader, testloader, epochs=1, device=DEVICE) - return get_parameters(net), len(trainloader.dataset), results - - def evaluate(self, parameters, config): - set_parameters(net, parameters) - loss, accuracy = test(net, testloader) - return loss, len(testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - return FlowerClient().to_client() - - -app = ClientApp( - client_fn=client_fn, -) diff --git a/examples/flower-authentication/prepare_dataset.py b/examples/flower-authentication/prepare_dataset.py new file mode 100644 index 000000000000..184eb5cf4104 --- /dev/null +++ b/examples/flower-authentication/prepare_dataset.py @@ -0,0 +1,47 @@ +import argparse +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +DATASET_DIRECTORY = "datasets" + + +def save_dataset_to_disk(num_partitions: int): + """This function downloads the CIFAR-10 dataset and generates N partitions. + + Each will be saved into the DATASET_DIRECTORY. + """ + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + + for partition_id in range(num_partitions): + partition = fds.load_partition(partition_id) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + partition_train_test.save_to_disk( + f"./{DATASET_DIRECTORY}/cifar10_part_{partition_id + 1}" + ) + + +if __name__ == "__main__": + # Initialize argument parser + parser = argparse.ArgumentParser( + description="Save CIFAR-10 dataset partitions to disk" + ) + + # Add an optional positional argument for number of partitions + parser.add_argument( + "num_partitions", + type=int, + nargs="?", + default=2, + help="Number of partitions to create (default: 2)", + ) + + # Parse the arguments + args = parser.parse_args() + + # Call the function with the provided argument + save_dataset_to_disk(args.num_partitions) diff --git a/examples/flower-authentication/pyproject.toml b/examples/flower-authentication/pyproject.toml index 575d1e6618f5..963fb2af3564 100644 --- a/examples/flower-authentication/pyproject.toml +++ b/examples/flower-authentication/pyproject.toml @@ -3,16 +3,37 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "flower-client-authentication" -version = "0.1.0" -description = "Multi-Tenant Federated Learning with Flower and PyTorch" -authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }] +name = "authexample" +version = "1.0.0" +description = "Federated Learning with PyTorch and authenticated Flower " +license = "Apache-2.0" dependencies = [ - "flwr-nightly[rest,simulation]", - "torch==1.13.1", - "torchvision==0.14.1", - "tqdm==4.66.3", + "flwr>=1.13.1", + "flwr-datasets[vision]>=0.4.0", + "torch>=2.5.0,<3.0.0", + "torchvision>=0.20.1,<0.21.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "authexample.server_app:app" +clientapp = "authexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +local-epochs = 1 +learning-rate = 0.1 +batch-size = 32 + +[tool.flwr.federations] +default = "my-federation" + +[tool.flwr.federations.my-federation] +address = "127.0.0.1:9093" # Address of the Exec API +root-certificates = "certificates/ca.crt" diff --git a/examples/flower-authentication/server.py b/examples/flower-authentication/server.py deleted file mode 100644 index 44908a0d9fc4..000000000000 --- a/examples/flower-authentication/server.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics -from flwr.server import ServerApp -from flwr.server.strategy.fedavg import FedAvg - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used - train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - train_accuracies = [ - num_examples * m["train_accuracy"] for num_examples, m in metrics - ] - val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] - val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - - # Aggregate and return custom metric (weighted average) - return { - "train_loss": sum(train_losses) / sum(examples), - "train_accuracy": sum(train_accuracies) / sum(examples), - "val_loss": sum(val_losses) / sum(examples), - "val_accuracy": sum(val_accuracies) / sum(examples), - } - - -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, # Select all available clients - fraction_evaluate=0.0, # Disable evaluation - min_available_clients=2, - fit_metrics_aggregation_fn=weighted_average, -) - - -app = ServerApp( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/flower-in-30-minutes/tutorial.ipynb b/examples/flower-in-30-minutes/tutorial.ipynb index ed8d9a49dcd7..a16b852848ab 100644 --- a/examples/flower-in-30-minutes/tutorial.ipynb +++ b/examples/flower-in-30-minutes/tutorial.ipynb @@ -7,7 +7,7 @@ "source": [ "Welcome to the 30 minutes Flower federated learning tutorial!\n", "\n", - "In this tutorial you will implement your first Federated Learning project using [Flower](https://flower.ai/).\n", + "In this tutorial you will implement your first Federated Learning project using [Flower](https://flower.ai/). You can find a similar tutorial, but using the preferred `flwr run` CLI command to launch your experiments in [the Flower Documentation](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html).\n", "\n", "🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated learning. Only a basic understanding of data science and Python programming is assumed. A minimal understanding of ML is not required but if you already know about it, nothing is stopping your from modifying this code as you see fit!\n", "\n", @@ -18,18 +18,6 @@ "Let's get started!" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Complementary Content\n", - "\n", - "But before do so, let me point you to a few video tutorials in the [Flower Youtube channel](https://www.youtube.com/@flowerlabs) that you might want to check out after this tutorial. We post new videos fairly regularly with new content:\n", - "* **[VIDEO]** quickstart-tensorflow: [15-min video on how to start with Flower + Tensorflow/Keras](https://www.youtube.com/watch?v=FGTc2TQq7VM)\n", - "* **[VIDEO]** quickstart-pytorch: [20-min video on how to start with Flower + PyTorch](https://www.youtube.com/watch?v=jOmmuzMIQ4c)\n", - "* **[VIDEO]** Flower simulation mini-series: [9 line-by-line video tutorials](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -371,26 +359,28 @@ "metadata": {}, "outputs": [], "source": [ - "def train(net, trainloader, optimizer, epochs):\n", + "def train(net, trainloader, optimizer, device=\"cpu\"):\n", " \"\"\"Train the network on the training set.\"\"\"\n", " criterion = torch.nn.CrossEntropyLoss()\n", + " net.to(device)\n", " net.train()\n", " for batch in trainloader:\n", - " images, labels = batch[\"image\"], batch[\"label\"]\n", + " images, labels = batch[\"image\"].to(device), batch[\"label\"].to(device)\n", " optimizer.zero_grad()\n", " loss = criterion(net(images), labels)\n", " loss.backward()\n", " optimizer.step()\n", "\n", "\n", - "def test(net, testloader):\n", + "def test(net, testloader, device):\n", " \"\"\"Validate the network on the entire test set.\"\"\"\n", " criterion = torch.nn.CrossEntropyLoss()\n", " correct, loss = 0, 0.0\n", + " net.to(device)\n", " net.eval()\n", " with torch.no_grad():\n", " for batch in testloader:\n", - " images, labels = batch[\"image\"], batch[\"label\"]\n", + " images, labels = batch[\"image\"].to(device), batch[\"label\"].to(device)\n", " outputs = net(images)\n", " loss += criterion(outputs, labels).item()\n", " _, predicted = torch.max(outputs.data, 1)\n", @@ -407,16 +397,20 @@ " # instantiate the model\n", " model = Net(num_classes=10)\n", "\n", + " # Discover device\n", + " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + " model.to(device)\n", + "\n", " # define optimiser with hyperparameters supplied\n", " optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)\n", "\n", " # train for the specified number of epochs\n", " for e in range(epochs):\n", " print(f\"Training epoch {e} ...\")\n", - " train(model, trainloader, optim, epochs)\n", + " train(model, trainloader, optim, device)\n", "\n", " # training is completed, then evaluate model on the test set\n", - " loss, accuracy = test(model, testloader)\n", + " loss, accuracy = test(model, testloader, device)\n", " print(f\"{loss = }\")\n", " print(f\"{accuracy = }\")" ] @@ -591,6 +585,7 @@ " self.trainloader = trainloader\n", " self.valloader = valloader\n", " self.model = Net(num_classes=10)\n", + " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "\n", " def fit(self, parameters, config):\n", " \"\"\"This method trains the model using the parameters sent by the\n", @@ -604,7 +599,7 @@ " optim = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9)\n", "\n", " # do local training (call same function as centralised setting)\n", - " train(self.model, self.trainloader, optim, epochs=1)\n", + " train(self.model, self.trainloader, optim, self.device)\n", "\n", " # return the model parameters to the server as well as extra info (number of training examples in this case)\n", " return get_params(self.model), len(self.trainloader), {}\n", @@ -615,7 +610,7 @@ "\n", " set_params(self.model, parameters)\n", " # do local evaluation (call same function as centralised setting)\n", - " loss, accuracy = test(self.model, self.valloader)\n", + " loss, accuracy = test(self.model, self.valloader, self.device)\n", " # send statistics back to the server\n", " return float(loss), len(self.valloader), {\"accuracy\": accuracy}\n", "\n", @@ -625,7 +620,7 @@ " \"\"\"Replace model parameters with those passed as `parameters`.\"\"\"\n", "\n", " params_dict = zip(model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", + " state_dict = OrderedDict({k: torch.from_numpy(v) for k, v in params_dict})\n", " # now replace the parameters\n", " model.load_state_dict(state_dict, strict=True)\n", "\n", @@ -879,6 +874,8 @@ " \"\"\"Evaluate global model on the whole test set.\"\"\"\n", "\n", " model = Net(num_classes=10)\n", + " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + " model.to(device)\n", "\n", " # set parameters to the model\n", " params_dict = zip(model.state_dict().keys(), parameters)\n", @@ -886,7 +883,7 @@ " model.load_state_dict(state_dict, strict=True)\n", "\n", " # call test (evaluate model as in centralised setting)\n", - " loss, accuracy = test(model, testloader)\n", + " loss, accuracy = test(model, testloader, device)\n", " return loss, {\"accuracy\": accuracy}\n", "\n", " return evaluate_fn" @@ -974,13 +971,19 @@ "\n", "* **[DOCS]** How about running your Flower clients on the GPU? find out how to do it in the [Flower Simulation Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html)\n", "\n", - "* **[VIDEO]** You can follow our [detailed line-by-line 9-videos tutorial](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB) about everything you need to know to design your own Flower Simulation pipelines\n", + "* Check the quickstart tutorials in https://flower.ai/docs/framework/, for example:\n", + " * [Quickstart PyTorch](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html)\n", + " * [Quickstart TensorFlow](https://flower.ai/docs/framework/tutorial-quickstart-tensorflow.html)\n", + " * [Quickstart JAX](https://flower.ai/docs/framework/tutorial-quickstart-jax.html)\n", "\n", - "* Check more advanced simulation examples the Flower GitHub:\n", + "* Most examples in the Flower Flower GitHub can run in simulation. These are some:\n", "\n", - " * Flower simulation with Tensorflow/Keras: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/adap/flower/tree/main/examples/simulation-tensorflow)\n", + " * [Advanced PyTorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)\n", + " * [Finetuning a ViT](https://github.com/adap/flower/tree/main/examples/flowertune-vit)\n", + " * [Quickstart with 🤗 Huggingface](https://github.com/adap/flower/tree/main/examples/quickstart-huggingface)\n", + " * [Quickstart with XGBoost](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)\n", + " * [Quickstart with MLX](https://github.com/adap/flower/tree/main/examples/quickstart-mlx)\n", "\n", - " * Flower simulation with Pytorch: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/adap/flower/tree/main/examples/simulation-pytorch)\n", "\n", "* **[DOCS]** All Flower examples: https://flower.ai/docs/examples/\n", "\n", @@ -1001,7 +1004,8 @@ "toc_visible": true }, "kernelspec": { - "display_name": "Python 3", + "display_name": "flwr-rtwXnbAq-py3.10", + "language": "python", "name": "python3" } }, diff --git a/examples/flower-secure-aggregation/pyproject.toml b/examples/flower-secure-aggregation/pyproject.toml index 89903184f60a..554e1c92c285 100644 --- a/examples/flower-secure-aggregation/pyproject.toml +++ b/examples/flower-secure-aggregation/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Secure Aggregation in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/opacus/pyproject.toml b/examples/opacus/pyproject.toml index 4814709569ef..d651ce6bc610 100644 --- a/examples/opacus/pyproject.toml +++ b/examples/opacus/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Sample-level Differential Privacy with Opacus in Flower" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.1.1", "torchvision==0.16.1", diff --git a/examples/pytorch-federated-variational-autoencoder/pyproject.toml b/examples/pytorch-federated-variational-autoencoder/pyproject.toml index ade08a639f2b..b008b0f07ae8 100644 --- a/examples/pytorch-federated-variational-autoencoder/pyproject.toml +++ b/examples/pytorch-federated-variational-autoencoder/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Variational Autoencoder Example with PyTorch and Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/quickstart-fastai/pyproject.toml b/examples/quickstart-fastai/pyproject.toml index 34b817f84e41..c79292787bf3 100644 --- a/examples/quickstart-fastai/pyproject.toml +++ b/examples/quickstart-fastai/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with Fastai and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "fastai==2.7.14", "torch==2.2.0", diff --git a/examples/quickstart-jax/README.md b/examples/quickstart-jax/README.md index b47f3a82e13b..98f9ec8e7901 100644 --- a/examples/quickstart-jax/README.md +++ b/examples/quickstart-jax/README.md @@ -1,85 +1,67 @@ --- tags: [quickstart, linear regression] dataset: [Synthetic] -framework: [JAX] +framework: [JAX, FLAX] --- -# JAX: From Centralized To Federated +# Federated Learning with JAX and Flower (Quickstart Example) -This example demonstrates how an already existing centralized JAX-based machine learning project can be federated with Flower. +This introductory example to Flower uses JAX, but deep knowledge of JAX is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [FLAX](https://flax.readthedocs.io/en/latest/index.html) to define and train a small CNN model. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MINST dataset. -This introductory example for Flower uses JAX, but you're not required to be a JAX expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing JAX project. +## Set up the project -## Project Setup +### Clone the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-jax . && rm -rf flower && cd quickstart-jax +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-jax . \ + && rm -rf _tmp \ + && cd quickstart-jax ``` -This will create a new directory called `quickstart-jax`, containing the following files: +This will create a new directory called `quickstart-jax` with the following structure: ```shell --- pyproject.toml --- requirements.txt --- jax_training.py --- client.py --- server.py --- README.md +quickstart-jax +├── jaxexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies +### Install dependencies and project -Project dependencies (such as `jax` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +Install the dependencies defined in `pyproject.toml` as well as the `jaxexample` package. -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Run JAX Federated +## Run the project -This JAX example is based on the [Linear Regression with JAX](https://coax.readthedocs.io/en/latest/examples/linear_regression/jax.html) tutorial and uses a sklearn dataset (generating a random dataset for a regression problem). Feel free to consult the tutorial if you want to get a better understanding of JAX. If you play around with the dataset, please keep in mind that the data samples are generated randomly depending on the settings being done while calling the dataset function. Please checkout out the [scikit-learn tutorial for further information](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html). The file `jax_training.py` contains all the steps that are described in the tutorial. It loads the train and test dataset and a linear regression model, trains the model with the training set, and evaluates the trained model on the test set. +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -The only things we need are a simple Flower server (in `server.py`) and a Flower client (in `client.py`). The Flower client basically takes model and training code tells Flower how to call it. +### Run with the Simulation Engine -Start the server in a terminal as follows: - -```shell -python3 server.py +```bash +flwr run . ``` -Now that the server is running and waiting for clients, we can start two clients that will participate in the federated learning process. To do so simply open two more terminal windows and run the following commands. +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -Start client 1 in the first terminal: - -```shell -python3 client.py +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=32" ``` -Start client 2 in the second terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart JAX tutorial](https://flower.ai/docs/framework/tutorial-quickstart-jax.html) -```shell -python3 client.py -``` +### Run with the Deployment Engine -You are now training a JAX-based linear regression model, federated across two clients. The setup is of course simplified since both clients hold a similar dataset, but you can now continue with your own explorations. How about changing from a linear regression to a more sophisticated model? How about adding more clients? +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-jax/client.py b/examples/quickstart-jax/client.py deleted file mode 100644 index 4a2aaf0e5a93..000000000000 --- a/examples/quickstart-jax/client.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Flower client example using JAX for linear regression.""" - -from typing import Callable, Dict, List, Tuple - -import flwr as fl -import jax -import jax.numpy as jnp -import jax_training -import numpy as np - -# Load data and determine model shape -train_x, train_y, test_x, test_y = jax_training.load_data() -grad_fn = jax.grad(jax_training.loss_fn) -model_shape = train_x.shape[1:] - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self): - self.params = jax_training.load_model(model_shape) - - def get_parameters(self, config): - parameters = [] - for _, val in self.params.items(): - parameters.append(np.array(val)) - return parameters - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - for key, value in list(zip(self.params.keys(), parameters)): - self.params[key] = value - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train( - self.params, grad_fn, train_x, train_y - ) - parameters = self.get_parameters(config={}) - return parameters, num_examples, {"loss": float(loss)} - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation( - self.params, grad_fn, test_x, test_y - ) - return float(loss), num_examples, {"loss": float(loss)} - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() -) diff --git a/examples/quickstart-jax/jax_training.py b/examples/quickstart-jax/jax_training.py deleted file mode 100644 index f57db75d5963..000000000000 --- a/examples/quickstart-jax/jax_training.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Linear Regression with JAX. - -This code examples is based on the following code example: -https://coax.readthedocs.io/en/latest/examples/linear_regression/jax.html - -If you have any questions concerning the linear regression used with jax -please read the JAX documentation or the mentioned tutorial. -""" - -from typing import Callable, Dict, List, Tuple - -import jax -import jax.numpy as jnp -import numpy as np -from sklearn.datasets import make_regression -from sklearn.model_selection import train_test_split - -key = jax.random.PRNGKey(0) - - -def load_data() -> ( - Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]] -): - # Load dataset - X, y = make_regression(n_features=3, random_state=0) - X, X_test, y, y_test = train_test_split(X, y) - return X, y, X_test, y_test - - -def load_model(model_shape) -> Dict: - # Extract model parameters - params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} - return params - - -def loss_fn(params, X, y) -> Callable: - # Return MSE as loss - err = jnp.dot(X, params["w"]) + params["b"] - y - return jnp.mean(jnp.square(err)) - - -def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: - num_examples = X.shape[0] - for epochs in range(50): - grads = grad_fn(params, X, y) - params = jax.tree_map(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params, X, y) - if epochs % 10 == 0: - print(f"For Epoch {epochs} loss {loss}") - return params, loss, num_examples - - -def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: - num_examples = X_test.shape[0] - err_test = loss_fn(params, X_test, y_test) - loss_test = jnp.mean(jnp.square(err_test)) - return loss_test, num_examples - - -def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - print("Params", params) - params, loss, num_examples = train(params, grad_fn, X, y) - print("Training loss:", loss) - loss, num_examples = evaluation(params, grad_fn, X_test, y_test) - print("Evaluation loss:", loss) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-jax/jaxexample/__init__.py b/examples/quickstart-jax/jaxexample/__init__.py new file mode 100644 index 000000000000..f04ba7eccc81 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/__init__.py @@ -0,0 +1 @@ +"""jaxexample: A Flower / JAX app.""" diff --git a/examples/quickstart-jax/jaxexample/client_app.py b/examples/quickstart-jax/jaxexample/client_app.py new file mode 100644 index 000000000000..915b0d4f16be --- /dev/null +++ b/examples/quickstart-jax/jaxexample/client_app.py @@ -0,0 +1,66 @@ +"""jaxexample: A Flower / JAX app.""" + +import numpy as np +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from jaxexample.task import ( + apply_model, + create_train_state, + get_params, + load_data, + set_params, + train, +) + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + def __init__(self, train_state, trainset, testset): + self.train_state = train_state + self.trainset, self.testset = trainset, testset + + def fit(self, parameters, config): + self.train_state = set_params(self.train_state, parameters) + self.train_state, loss, acc = train(self.train_state, self.trainset) + params = get_params(self.train_state.params) + return ( + params, + len(self.trainset), + {"train_acc": float(acc), "train_loss": float(loss)}, + ) + + def evaluate(self, parameters, config): + self.train_state = set_params(self.train_state, parameters) + + losses = [] + accs = [] + for batch in self.testset: + _, loss, accuracy = apply_model( + self.train_state, batch["image"], batch["label"] + ) + losses.append(float(loss)) + accs.append(float(accuracy)) + + return np.mean(losses), len(self.testset), {"accuracy": np.mean(accs)} + + +def client_fn(context: Context): + + num_partitions = context.node_config["num-partitions"] + partition_id = context.node_config["partition-id"] + batch_size = context.run_config["batch-size"] + trainset, testset = load_data(partition_id, num_partitions, batch_size) + + # Create train state object (model + optimizer) + lr = context.run_config["learning-rate"] + train_state = create_train_state(lr) + + # Return Client instance + return FlowerClient(train_state, trainset, testset).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/quickstart-jax/jaxexample/server_app.py b/examples/quickstart-jax/jaxexample/server_app.py new file mode 100644 index 000000000000..1accf9dabd21 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/server_app.py @@ -0,0 +1,47 @@ +"""jaxexample: A Flower / JAX app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from jax import random + +from jaxexample.task import create_model, get_params + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize global model + rng = random.PRNGKey(0) + rng, _ = random.split(rng) + _, model_params = create_model(rng) + params = get_params(model_params) + initial_parameters = ndarrays_to_parameters(params) + + # Define strategy + strategy = FedAvg( + fraction_fit=0.4, + fraction_evaluate=0.5, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-jax/jaxexample/task.py b/examples/quickstart-jax/jaxexample/task.py new file mode 100644 index 000000000000..3b923dbe6ae8 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/task.py @@ -0,0 +1,152 @@ +"""jaxexample: A Flower / JAX app.""" + +import warnings + +import jax +import jax.numpy as jnp +import numpy as np +import optax +from datasets.utils.logging import disable_progress_bar +from flax import linen as nn +from flax.training.train_state import TrainState +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +disable_progress_bar() + +rng = jax.random.PRNGKey(0) +rng, init_rng = jax.random.split(rng) + +warnings.filterwarnings("ignore", category=UserWarning) +warnings.filterwarnings("ignore", category=RuntimeWarning) + + +class CNN(nn.Module): + """A simple CNN model.""" + + @nn.compact + def __call__(self, x): + x = nn.Conv(features=6, kernel_size=(5, 5))(x) + x = nn.relu(x) + x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) + x = nn.Conv(features=16, kernel_size=(5, 5))(x) + x = nn.relu(x) + x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) + x = x.reshape((x.shape[0], -1)) # flatten + x = nn.Dense(features=120)(x) + x = nn.relu(x) + x = nn.Dense(features=84)(x) + x = nn.relu(x) + x = nn.Dense(features=10)(x) + return x + + +def create_model(rng): + cnn = CNN() + return cnn, cnn.init(rng, jnp.ones([1, 28, 28, 1]))["params"] + + +def create_train_state(learning_rate: float): + """Creates initial `TrainState`.""" + + tx = optax.sgd(learning_rate, momentum=0.9) + model, model_params = create_model(rng) + return TrainState.create(apply_fn=model.apply, params=model_params, tx=tx) + + +def get_params(params): + """Get model parameters as list of numpy arrays.""" + return [np.array(param) for param in jax.tree_util.tree_leaves(params)] + + +def set_params(train_state: TrainState, global_params) -> TrainState: + """Create a new trainstate with the global_params.""" + new_params_dict = jax.tree_util.tree_unflatten( + jax.tree_util.tree_structure(train_state.params), global_params + ) + return train_state.replace(params=new_params_dict) + + +@jax.jit +def apply_model(state, images, labels): + """Computes gradients, loss and accuracy for a single batch.""" + + def loss_fn(params): + logits = state.apply_fn({"params": params}, images) + one_hot = jax.nn.one_hot(labels, 10) + loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) + return loss, logits + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, logits), grads = grad_fn(state.params) + accuracy = jnp.mean(jnp.argmax(logits, -1) == labels) + return grads, loss, accuracy + + +@jax.jit +def update_model(state, grads): + return state.apply_gradients(grads=grads) + + +def train(state, train_ds): + """Train for a single epoch.""" + + epoch_loss = [] + epoch_accuracy = [] + + for batch in train_ds: + batch_images = batch["image"] + batch_labels = batch["label"] + grads, loss, accuracy = apply_model(state, batch_images, batch_labels) + state = update_model(state, grads) + epoch_loss.append(loss) + epoch_accuracy.append(accuracy) + train_loss = np.mean(epoch_loss) + train_accuracy = np.mean(epoch_accuracy) + return state, train_loss, train_accuracy + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int, batch_size: int): + """Load partition MNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + + partition["train"].set_format("jax") + partition["test"].set_format("jax") + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [ + jnp.expand_dims(jnp.float32(img), 3) / 255 for img in batch["image"] + ] + batch["label"] = [jnp.int16(label) for label in batch["label"]] + return batch + + train_partition = ( + partition["train"] + .batch(batch_size, num_proc=2, drop_last_batch=True) + .with_transform(apply_transforms) + ) + test_partition = ( + partition["test"] + .batch(batch_size, num_proc=2, drop_last_batch=True) + .with_transform(apply_transforms) + ) + + train_partition.shuffle(seed=1234) + test_partition.shuffle(seed=1234) + + return train_partition, test_partition diff --git a/examples/quickstart-jax/pyproject.toml b/examples/quickstart-jax/pyproject.toml index 68a3455aedee..8e677006a691 100644 --- a/examples/quickstart-jax/pyproject.toml +++ b/examples/quickstart-jax/pyproject.toml @@ -1,16 +1,38 @@ -[tool.poetry] -name = "jax_example" -version = "0.1.0" -description = "JAX example training a linear regression model with federated learning" -authors = ["The Flower Authors "] +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry.dependencies] -python = ">=3.9,<3.11" -flwr = "1.0.0" -jax = "0.4.17" -jaxlib = "0.4.17" -scikit-learn = "1.1.1" +[project] +name = "jaxexample" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.13.1", + "flwr-datasets[vision]>=0.4.0", + "datasets>=2.21.0", + "jax==0.4.31", + "jaxlib==0.4.31", + "flax==0.9.0", +] -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "jaxexample.server_app:app" +clientapp = "jaxexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 +learning-rate = 0.1 +batch-size = 64 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 50 diff --git a/examples/quickstart-jax/requirements.txt b/examples/quickstart-jax/requirements.txt deleted file mode 100644 index 964f07a51b7d..000000000000 --- a/examples/quickstart-jax/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0,<2.0 -jax==0.4.17 -jaxlib==0.4.17 -scikit-learn==1.1.1 diff --git a/examples/quickstart-jax/run.sh b/examples/quickstart-jax/run.sh deleted file mode 100755 index c64f362086aa..000000000000 --- a/examples/quickstart-jax/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-jax/server.py b/examples/quickstart-jax/server.py deleted file mode 100644 index 2bc3716d84ae..000000000000 --- a/examples/quickstart-jax/server.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/quickstart-mlx/pyproject.toml b/examples/quickstart-mlx/pyproject.toml index 3165a3d93881..1c8d2b0b811c 100644 --- a/examples/quickstart-mlx/pyproject.toml +++ b/examples/quickstart-mlx/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with MLX and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "mlx==0.16.0", "numpy==1.26.4", diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml index a80311292acb..986ae9abd0ac 100644 --- a/examples/quickstart-pandas/pyproject.toml +++ b/examples/quickstart-pandas/pyproject.toml @@ -12,7 +12,7 @@ authors = [ { name = "Ragy Haddad", email = "ragy202@gmail.com" }, ] dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "numpy==1.24.4", "pandas==2.0.0", diff --git a/examples/quickstart-pytorch-lightning/pyproject.toml b/examples/quickstart-pytorch-lightning/pyproject.toml index e305d1ca75e8..86f90e945236 100644 --- a/examples/quickstart-pytorch-lightning/pyproject.toml +++ b/examples/quickstart-pytorch-lightning/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch Lightning and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "pytorch-lightning<2.0.0; sys_platform == 'darwin'", "pytorch-lightning==1.6.0; sys_platform != 'darwin'", diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index d07f83a7ea85..2dc26b054c9e 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -48,7 +48,11 @@ You can run your Flower project in both _simulation_ and _deployment_ mode witho ### Run with the Simulation Engine +> \[!TIP\] +> This example might run faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. + ```bash +# Run with the default federation (CPU only) flwr run . ``` @@ -58,6 +62,13 @@ You can also override some of the settings for your `ClientApp` and `ServerApp` flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 5x`ClientApp` will run in parallel in the available GPU. You can tweak the degree of parallelism by adjusting the settings of this federation in the `pyproject.toml`. + +```bash +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu +``` + > \[!TIP\] > For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html) diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index fa086d18880d..0e72bce6756b 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", @@ -36,3 +36,8 @@ default = "local-simulation" [tool.flwr.federations.local-simulation] options.num-supernodes = 10 + +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.2 # at most 5 ClientApp will run in a given GPU diff --git a/examples/quickstart-pytorch/pytorchexample/task.py b/examples/quickstart-pytorch/pytorchexample/task.py index 8e0808871616..d115c9f1a469 100644 --- a/examples/quickstart-pytorch/pytorchexample/task.py +++ b/examples/quickstart-pytorch/pytorchexample/task.py @@ -100,6 +100,7 @@ def train(net, trainloader, valloader, epochs, learning_rate, device): def test(net, testloader, device): """Validate the model on the test set.""" + net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): diff --git a/examples/quickstart-sklearn-tabular/pyproject.toml b/examples/quickstart-sklearn-tabular/pyproject.toml index 4fc34ed58bb6..7c7554920973 100644 --- a/examples/quickstart-sklearn-tabular/pyproject.toml +++ b/examples/quickstart-sklearn-tabular/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with scikit-learn and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.3.0", ] diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml index f5fc566d654c..3194640fa654 100644 --- a/examples/quickstart-tensorflow/pyproject.toml +++ b/examples/quickstart-tensorflow/pyproject.toml @@ -8,9 +8,9 @@ version = "1.0.0" description = "Federated Learning with Tensorflow/Keras and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", - "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", + "tensorflow>=2.9.1, != 2.11.1 ; (platform_machine == \"x86_64\" or platform_machine == \"aarch64\")", "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", ] [tool.hatch.build.targets.wheel] @@ -36,3 +36,7 @@ default = "local-simulation" [tool.flwr.federations.local-simulation] options.num-supernodes = 10 + +[tool.flwr.federations.local-deployment] +address = "127.0.0.1:9093" +root-certificates = "./.cache/certificates/ca.crt" diff --git a/examples/sklearn-logreg-mnist/pyproject.toml b/examples/sklearn-logreg-mnist/pyproject.toml index 75dae57a0a40..797ae8045e0b 100644 --- a/examples/sklearn-logreg-mnist/pyproject.toml +++ b/examples/sklearn-logreg-mnist/pyproject.toml @@ -12,9 +12,9 @@ authors = [ { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", - "numpy<2.0.0", + "numpy>=2.0.0", "scikit-learn~=1.2.2", ] diff --git a/examples/tensorflow-privacy/pyproject.toml b/examples/tensorflow-privacy/pyproject.toml index b404f7f183a0..b9de0b692408 100644 --- a/examples/tensorflow-privacy/pyproject.toml +++ b/examples/tensorflow-privacy/pyproject.toml @@ -7,7 +7,7 @@ name = "tensorflow-privacy-fl" version = "1.0.0" description = "Sample-level Differential Privacy with Tensorflow-Privacy in Flower" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "tensorflow-estimator~=2.4", "tensorflow-probability~=0.22.0", diff --git a/examples/vertical-fl/pyproject.toml b/examples/vertical-fl/pyproject.toml index 458878748cde..2376b55e1110 100644 --- a/examples/vertical-fl/pyproject.toml +++ b/examples/vertical-fl/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "PyTorch Vertical FL with Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", "numpy==1.24.4", "pandas==2.0.3", diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index 3906f8bf3301..906b035cd6a2 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with XGBoost and Flower (Comprehensive Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", "xgboost>=2.0.0", ] diff --git a/glossary/flower-datasets.mdx b/glossary/flower-datasets.mdx index 24537dfe223b..8bc458af2aa0 100644 --- a/glossary/flower-datasets.mdx +++ b/glossary/flower-datasets.mdx @@ -13,7 +13,7 @@ related: link: "https://github.com/adap/flower/tree/main/datasets" --- -Flower Datasets is a library that enables the creation of datasets for federated learning/analytics/evaluation by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets. It was created by the Flower Labs team, which also created Flower - a Friendly Federated Learning Framework. +Flower Datasets is a library that enables the creation of datasets for federated learning/analytics/evaluation by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets. It was created by the Flower Labs team, which also created Flower - A Friendly Federated AI Framework. The key features include: * downloading datasets (HuggingFace `datasets` are used under the hood), diff --git a/glossary/heterogeneity-in-federated-learning.mdx b/glossary/heterogeneity-in-federated-learning.mdx new file mode 100644 index 000000000000..2c95cea02687 --- /dev/null +++ b/glossary/heterogeneity-in-federated-learning.mdx @@ -0,0 +1,38 @@ +--- +title: "Heterogenity in Federated Learning" +description: "Heterogeneity is a core challenge in FL, and countering the problems that result from it is an active field of study. We distinguish statistical and structural heterogeneity." +date: "2024-05-24" +author: + name: "Adam Narożniak" + position: "ML Engineer at Flower Labs" + website: "https://discuss.flower.ai/u/adam.narozniak/summary" +--- + +Heterogeneity is a core challenge in federated learning (FL), and countering the problems that result from it is an active field of study in FL. We can distinguish the following categories: +* statistical heterogeneity (related to data), +* structural heterogeneity (related to resources and infrastructure). + +Real-world FL training can exhibit any combination of the problems described below. + +### Statistical Heterogeneity +Statistical heterogeneity is the situation in which the clients' distributions are not equal, which can be a result of the following: +* feature distribution skew (covariate shift), +* label distribution skew (prior probability shift), +* same label, different features (concept drift), +* same features, different label (concept shift), +* quantity skew. + +### Structural Heterogeneity +Structural Heterogeneity results from different types of devices that can be in the same federation of FL devices, which can exhibit differences in the following: +* computation resource (different chips), which leads to different training times, +* storage resources (available disk space), which can imply, e.g., not enough resources to store the results (which can also indicate a different number of samples), +* energy levels/charging status, current resource consumption, which can change over time and can imply the lack of willingness/capabilities to join the training, +* network connection, e.g., unstable network connection, can lead to more frequent dropouts and lack of availability. + +### Simulating Heterogeneity - Flower Datasets +Flower Datasets is a library that enables you to simulate statistical heterogeneity according to the various partitioning schemes (see all [here](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html)). +It provides ways of simulating quantity skew, label distribution skew, and mix of them, depending on the object used. It also enables working with datasets that naturally exhibit different types of heterogeneity. + +### Countering Heterogeneity - Strategies in Flower +Flower is a library that enables you to perform federated learning in deployment (real-life scenario) and simulation. It provides out-of-the-box weight aggregation +strategies (see them [here](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.html)), which are used as the core measures to mitigate problems in heterogeneous environments. diff --git a/pyproject.toml b/pyproject.toml index 4b32908c8f51..f1207a94c448 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,8 +4,8 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.13.0" -description = "Flower: A Friendly Federated Learning Framework" +version = "1.14.0" +description = "Flower: A Friendly Federated AI Framework" license = "Apache-2.0" authors = ["The Flower Authors "] readme = "README.md" @@ -13,12 +13,13 @@ homepage = "https://flower.ai" repository = "https://github.com/adap/flower" documentation = "https://flower.ai" keywords = [ - "flower", - "fl", - "federated learning", - "federated analytics", - "federated evaluation", - "machine learning", + "Artificial Intelligence", + "Federated AI", + "Federated Analytics", + "Federated Evaluation", + "Federated Learning", + "Flower", + "Machine Learning", ] classifiers = [ "Development Status :: 5 - Production/Stable", @@ -34,6 +35,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", @@ -50,21 +52,23 @@ exclude = ["src/py/**/*_test.py"] # `flwr` CLI flwr = "flwr.cli.app:app" # SuperExec (can run with either Deployment Engine or Simulation Engine) -flower-superexec = "flwr.superexec.app:run_superexec" +flower-superexec = "flwr.superexec.app:run_superexec" # Deprecated # Simulation Engine +flwr-simulation = "flwr.simulation.app:flwr_simulation" flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" # Deployment Engine flower-superlink = "flwr.server.app:run_superlink" flower-supernode = "flwr.client.supernode.app:run_supernode" -flower-server-app = "flwr.server.run_serverapp:run_server_app" +flwr-serverapp = "flwr.server.serverapp:flwr_serverapp" +flower-server-app = "flwr.server.run_serverapp:run_server_app" # Deprecated flwr-clientapp = "flwr.client.clientapp:flwr_clientapp" flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated [tool.poetry.dependencies] python = "^3.9" # Mandatory dependencies -numpy = "^1.21.0" -grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4,!=1.65.5,!=1.66.0,!=1.66.1" +numpy = ">=1.26.0,<3.0.0" +grpcio = "^1.60.0,!=1.64.2,<=1.64.3" protobuf = "^4.25.2" cryptography = "^42.0.4" pycryptodome = "^3.18.0" @@ -73,6 +77,7 @@ typer = "^0.12.5" tomli = "^2.0.1" tomli-w = "^1.0.0" pathspec = "^0.12.1" +rich = "^13.5.0" # Optional dependencies (Simulation Engine) ray = { version = "==2.10.0", optional = true, python = ">=3.9,<3.12" } # Optional dependencies (REST transport layer) @@ -121,6 +126,7 @@ nbstripout = "==0.6.1" ruff = "==0.1.9" sphinx-argparse = "==0.4.0" pipreqs = "==0.4.13" +mdformat = "==0.7.18" mdformat-gfm = "==0.3.6" mdformat-frontmatter = "==2.0.1" mdformat-beautysh = "==0.1.1" @@ -133,7 +139,7 @@ licensecheck = "==2024" pre-commit = "==3.5.0" sphinx-substitution-extensions = "2022.02.16" sphinxext-opengraph = "==0.9.1" -docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-1" } +docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-2" } docsig = "==0.64.0" [tool.docstrfmt] diff --git a/src/docker/base/README.md b/src/docker/base/README.md index ef290a26fec4..9cf31ec0d048 100644 --- a/src/docker/base/README.md +++ b/src/docker/base/README.md @@ -21,8 +21,16 @@ - `unstable` - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` +- `nightly`, `.dev` e.g. `1.14.0.dev20241128` - uses Python 3.11 and Ubuntu 24.04 +- `1.13.1-py3.11-alpine3.19` +- `1.13.1-py3.11-ubuntu24.04` +- `1.13.1-py3.10-ubuntu24.04` +- `1.13.1-py3.9-ubuntu24.04` +- `1.13.0-py3.11-alpine3.19` +- `1.13.0-py3.11-ubuntu24.04` +- `1.13.0-py3.10-ubuntu24.04` +- `1.13.0-py3.9-ubuntu24.04` - `1.12.0-py3.11-alpine3.19` - `1.12.0-py3.11-ubuntu24.04` - `1.12.0-py3.10-ubuntu24.04` diff --git a/src/docker/base/ubuntu-cuda/Dockerfile b/src/docker/base/ubuntu-cuda/Dockerfile new file mode 100644 index 000000000000..3ffb6401805a --- /dev/null +++ b/src/docker/base/ubuntu-cuda/Dockerfile @@ -0,0 +1,116 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# hadolint global ignore=DL3008 +ARG CUDA_VERSION=12.4.1 +ARG DISTRO=ubuntu +ARG DISTRO_VERSION=24.04 +FROM nvidia/cuda:${CUDA_VERSION}-base-${DISTRO}${DISTRO_VERSION} AS python + +ENV DEBIAN_FRONTEND=noninteractive + +# Install system dependencies +RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + clang-format git unzip ca-certificates openssh-client liblzma-dev \ + build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev wget\ + libsqlite3-dev curl llvm libncursesw5-dev xz-utils tk-dev libxml2-dev \ + libxmlsec1-dev libffi-dev liblzma-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install PyEnv and Python +ARG PYTHON_VERSION=3.11 +ENV PYENV_ROOT=/root/.pyenv +ENV PATH=$PYENV_ROOT/bin:$PATH +# https://github.com/hadolint/hadolint/wiki/DL4006 +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash + +# hadolint ignore=DL3003 +RUN git clone https://github.com/pyenv/pyenv.git \ + && cd pyenv/plugins/python-build || exit \ + && ./install.sh + +# Issue: python-build only accepts the exact Python version e.g. 3.11.1 but +# we want to allow more general versions like 3.11 +# Solution: first use pyenv to get the exact version and then pass it to python-build +RUN LATEST=$(pyenv latest -k ${PYTHON_VERSION}) \ + && python-build "${LATEST}" /usr/local/bin/python + +ENV PATH=/usr/local/bin/python/bin:$PATH + +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} \ + # Use a virtual environment to ensure that Python packages are installed in the same location + # regardless of whether the subsequent image build is run with the app or the root user + && python -m venv /python/venv +ENV PATH=/python/venv/bin:$PATH + +RUN pip install -U --no-cache-dir \ + pip==${PIP_VERSION} \ + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi + +FROM nvidia/cuda:${CUDA_VERSION}-base-${DISTRO}${DISTRO_VERSION} AS base + +COPY --from=python /usr/local/bin/python /usr/local/bin/python + +ENV DEBIAN_FRONTEND=noninteractive \ + PATH=/usr/local/bin/python/bin:$PATH + +RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + libsqlite3-0 \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* \ + # add non-root user + && useradd \ + --no-create-home \ + --home-dir /app \ + -c "" \ + --uid 49999 app \ + && mkdir -p /app \ + && chown -R app:app /app + +COPY --from=python --chown=app:app /python/venv /python/venv + +ENV PATH=/python/venv/bin:$PATH \ + # Send stdout and stderr stream directly to the terminal. Ensures that no + # output is retained in a buffer if the application crashes. + PYTHONUNBUFFERED=1 \ + # Typically, bytecode is created on the first invocation to speed up following invocation. + # However, in Docker we only make a single invocation (when we start the container). + # Therefore, we can disable bytecode writing. + PYTHONDONTWRITEBYTECODE=1 \ + # Ensure that python encoding is always UTF-8. + PYTHONIOENCODING=UTF-8 \ + LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 + +WORKDIR /app +USER app +ENV HOME=/app diff --git a/src/docker/clientapp/README.md b/src/docker/clientapp/README.md index a610de66eeae..e41abeb6dde8 100644 --- a/src/docker/clientapp/README.md +++ b/src/docker/clientapp/README.md @@ -21,8 +21,14 @@ - `unstable` - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` +- `nightly`, `.dev` e.g. `1.14.0.dev20241128` - uses Python 3.11 and Ubuntu 24.04 +- `1.13.1`, `1.13.1-py3.11-ubuntu24.04` +- `1.13.1-py3.10-ubuntu24.04` +- `1.13.1-py3.9-ubuntu24.04` +- `1.13.0`, `1.13.0-py3.11-ubuntu24.04` +- `1.13.0-py3.10-ubuntu24.04` +- `1.13.0-py3.9-ubuntu24.04` - `1.12.0`, `1.12.0-py3.11-ubuntu24.04` - `1.12.0-py3.10-ubuntu24.04` - `1.12.0-py3.9-ubuntu24.04` diff --git a/src/docker/complete/.gitignore b/src/docker/complete/.gitignore index 53a6b57b9b4d..7f28fd7b182c 100644 --- a/src/docker/complete/.gitignore +++ b/src/docker/complete/.gitignore @@ -1,3 +1,2 @@ -superexec-certificates superlink-certificates state diff --git a/src/docker/complete/certs.yml b/src/docker/complete/certs.yml index d7d938a2aa4a..863c702fd9f6 100644 --- a/src/docker/complete/certs.yml +++ b/src/docker/complete/certs.yml @@ -36,30 +36,6 @@ services: IP.2 = $${SUPERLINK_IP} EOF - ARG SUPEREXEC_IP=127.0.0.1 - - COPY <<-EOF superexec-certificate.conf - [req] - default_bits = 4096 - prompt = no - default_md = sha256 - req_extensions = req_ext - distinguished_name = dn - - [dn] - C = US - O = Flower - CN = localhost - - [req_ext] - subjectAltName = @alt_names - - [alt_names] - DNS.0 = superexec - IP.1 = ::1 - IP.2 = $${SUPEREXEC_IP} - EOF - COPY --chmod=744 <<-'EOF' generate.sh #!/bin/bash # This script will generate all certificates if ca.crt does not exist @@ -113,7 +89,6 @@ services: -extensions req_ext } generate superlink-certificates superlink-certificate.conf - generate superexec-certificates superexec-certificate.conf EOF WORKDIR /app @@ -121,4 +96,3 @@ services: ENTRYPOINT ["./script/generate.sh"] volumes: - ./superlink-certificates/:/app/superlink-certificates/:rw - - ./superexec-certificates/:/app/superexec-certificates/:rw diff --git a/src/docker/complete/compose.yml b/src/docker/complete/compose.yml index b21189d94123..5287374cf040 100644 --- a/src/docker/complete/compose.yml +++ b/src/docker/complete/compose.yml @@ -1,16 +1,20 @@ services: # create a SuperLink service superlink: - image: flwr/superlink:${FLWR_VERSION:-1.12.0} + image: flwr/superlink:${FLWR_VERSION:-1.14.0} command: - --insecure + - --isolation + - process + ports: + - 9093:9093 - # create a SuperExec service - superexec: + # create a ServerApp service + serverapp: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + FROM flwr/serverapp:${FLWR_VERSION:-1.14.0} # gcc is required for the fastai quickstart example USER root @@ -25,26 +29,23 @@ services: RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-superexec"] - ports: - - 9093:9093 + ENTRYPOINT ["flwr-serverapp"] command: - - --executor - - flwr.superexec.deployment:executor - --insecure - - --executor-config - - superlink="superlink:9091" + - --serverappio-api-address + - superlink:9091 + restart: on-failure:3 depends_on: - superlink # create a two SuperNode service with different node configs supernode-1: - image: flwr/supernode:${FLWR_VERSION:-1.12.0} + image: flwr/supernode:${FLWR_VERSION:-1.14.0} command: - --insecure - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9094 - --isolation - process @@ -54,12 +55,12 @@ services: - superlink supernode-2: - image: flwr/supernode:${FLWR_VERSION:-1.12.0} + image: flwr/supernode:${FLWR_VERSION:-1.14.0} command: - --insecure - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9095 - --isolation - process @@ -71,12 +72,12 @@ services: # uncomment to add another SuperNode # # supernode-3: - # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # image: flwr/supernode:${FLWR_VERSION:-1.14.0} # command: # - --insecure # - --superlink # - superlink:9092 - # - --supernode-address + # - --clientappio-api-address # - 0.0.0.0:9096 # - --isolation # - process @@ -89,7 +90,7 @@ services: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} # gcc is required for the fastai quickstart example USER root @@ -106,7 +107,8 @@ services: ENTRYPOINT ["flwr-clientapp"] command: - - --supernode + - --insecure + - --clientappio-api-address - supernode-1:9094 deploy: resources: @@ -120,7 +122,7 @@ services: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} # gcc is required for the fastai quickstart example USER root @@ -137,7 +139,8 @@ services: ENTRYPOINT ["flwr-clientapp"] command: - - --supernode + - --insecure + - --clientappio-api-address - supernode-2:9095 deploy: resources: @@ -153,7 +156,7 @@ services: # build: # context: ${PROJECT_DIR:-.} # dockerfile_inline: | - # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + # FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} # # gcc is required for the fastai quickstart example # USER root @@ -170,7 +173,8 @@ services: # ENTRYPOINT ["flwr-clientapp"] # command: - # - --supernode + # - --insecure + # - --clientappio-api-address # - supernode-3:9096 # deploy: # resources: diff --git a/src/docker/complete/with-state.yml b/src/docker/complete/with-state.yml index cc922a9ef12e..2105ffbdf490 100644 --- a/src/docker/complete/with-state.yml +++ b/src/docker/complete/with-state.yml @@ -2,13 +2,17 @@ services: superlink: command: - --insecure + - --isolation + - process - --database=state/state.db # To toggle TLS encryption and persisting state for the SuperLink, comment the key `command` # above and uncomment the lines below: # command: - # - --ssl-ca-certfile=certificates/ca.crt - # - --ssl-certfile=certificates/server.pem - # - --ssl-keyfile=certificates/server.key - # - --database=state/state.db + # - --isolation + # - process + # - --ssl-ca-certfile=certificates/ca.crt + # - --ssl-certfile=certificates/server.pem + # - --ssl-keyfile=certificates/server.key + # - --database=state/state.db volumes: - ./state/:/app/state/:rw diff --git a/src/docker/complete/with-tls.yml b/src/docker/complete/with-tls.yml index 6cbeb2ba7397..e005db0182f6 100644 --- a/src/docker/complete/with-tls.yml +++ b/src/docker/complete/with-tls.yml @@ -1,6 +1,8 @@ services: superlink: command: + - --isolation + - process - --ssl-ca-certfile=certificates/ca.crt - --ssl-certfile=certificates/server.pem - --ssl-keyfile=certificates/server.key @@ -12,56 +14,37 @@ services: - source: superlink-keyfile target: /app/certificates/server.key - superexec: - command: - - --executor - - flwr.superexec.deployment:executor - - --executor-config - - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" - - --ssl-ca-certfile=certificates/ca.crt - - --ssl-certfile=certificates/server.pem - - --ssl-keyfile=certificates/server.key - secrets: - - source: superlink-ca-certfile - target: /app/certificates/superlink-ca.crt - - source: superexec-ca-certfile - target: /app/certificates/ca.crt - - source: superexec-certfile - target: /app/certificates/server.pem - - source: superexec-keyfile - target: /app/certificates/server.key - supernode-1: command: - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9094 - --isolation - process - --node-config - "partition-id=0 num-partitions=2" - --root-certificates - - certificates/ca.crt + - certificates/superlink-ca.crt secrets: - source: superlink-ca-certfile - target: /app/certificates/ca.crt + target: /app/certificates/superlink-ca.crt supernode-2: command: - --superlink - superlink:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9095 - --isolation - process - --node-config - "partition-id=1 num-partitions=2" - --root-certificates - - certificates/ca.crt + - certificates/superlink-ca.crt secrets: - source: superlink-ca-certfile - target: /app/certificates/ca.crt + target: /app/certificates/superlink-ca.crt # uncomment to enable TLS on another SuperNode # @@ -69,17 +52,17 @@ services: # command: # - --superlink # - superlink:9092 - # - --supernode-address + # - --clientappio-api-address # - 0.0.0.0:9096 # - --isolation # - process # - --node-config # - "partition-id=1 num-partitions=2" # - --root-certificates - # - certificates/ca.crt + # - certificates/superlink-ca.crt # secrets: # - source: superlink-ca-certfile - # target: /app/certificates/ca.crt + # target: /app/certificates/superlink-ca.crt secrets: superlink-ca-certfile: @@ -88,9 +71,3 @@ secrets: file: ./superlink-certificates/server.pem superlink-keyfile: file: ./superlink-certificates/server.key - superexec-ca-certfile: - file: ./superexec-certificates/ca.crt - superexec-certfile: - file: ./superexec-certificates/server.pem - superexec-keyfile: - file: ./superexec-certificates/server.key diff --git a/src/docker/distributed/.gitignore b/src/docker/distributed/.gitignore index 1a11330c6e95..9c249f378b96 100644 --- a/src/docker/distributed/.gitignore +++ b/src/docker/distributed/.gitignore @@ -1,3 +1,2 @@ -superexec-certificates superlink-certificates server/state diff --git a/src/docker/distributed/certs.yml b/src/docker/distributed/certs.yml index 48e157582e40..0c8a30096d7a 100644 --- a/src/docker/distributed/certs.yml +++ b/src/docker/distributed/certs.yml @@ -3,4 +3,3 @@ services: build: args: SUPERLINK_IP: ${SUPERLINK_IP:-127.0.0.1} - SUPEREXEC_IP: ${SUPEREXEC_IP:-127.0.0.1} diff --git a/src/docker/distributed/client/compose.yml b/src/docker/distributed/client/compose.yml index 6bc6e6739ae4..5f1dc1b1a4d8 100644 --- a/src/docker/distributed/client/compose.yml +++ b/src/docker/distributed/client/compose.yml @@ -1,62 +1,62 @@ services: supernode-1: - image: flwr/supernode:${FLWR_VERSION:-1.12.0} + image: flwr/supernode:${FLWR_VERSION:-1.14.0} command: - --superlink - ${SUPERLINK_IP:-127.0.0.1}:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9094 - --isolation - process - --node-config - "partition-id=0 num-partitions=2" - --root-certificates - - certificates/ca.crt + - certificates/superlink-ca.crt secrets: - source: superlink-ca-certfile - target: /app/certificates/ca.crt + target: /app/certificates/superlink-ca.crt supernode-2: - image: flwr/supernode:${FLWR_VERSION:-1.12.0} + image: flwr/supernode:${FLWR_VERSION:-1.14.0} command: - --superlink - ${SUPERLINK_IP:-127.0.0.1}:9092 - - --supernode-address + - --clientappio-api-address - 0.0.0.0:9095 - --isolation - process - --node-config - "partition-id=1 num-partitions=2" - --root-certificates - - certificates/ca.crt + - certificates/superlink-ca.crt secrets: - source: superlink-ca-certfile - target: /app/certificates/ca.crt + target: /app/certificates/superlink-ca.crt # uncomment to add another SuperNode # # supernode-3: - # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # image: flwr/supernode:${FLWR_VERSION:-1.14.0} # command: # - --superlink # - ${SUPERLINK_IP:-127.0.0.1}:9092 - # - --supernode-address + # - --clientappio-api-address # - 0.0.0.0:9096 # - --isolation # - process # - --node-config # - "partition-id=1 num-partitions=2" # - --root-certificates - # - certificates/ca.crt + # - certificates/superlink-ca.crt # secrets: # - source: superlink-ca-certfile - # target: /app/certificates/ca.crt + # target: /app/certificates/superlink-ca.crt clientapp-1: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} WORKDIR /app COPY --chown=app:app pyproject.toml . @@ -65,7 +65,8 @@ services: ENTRYPOINT ["flwr-clientapp"] command: - - --supernode + - --insecure + - --clientappio-api-address - supernode-1:9094 deploy: resources: @@ -79,7 +80,7 @@ services: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} WORKDIR /app COPY --chown=app:app pyproject.toml . @@ -88,7 +89,8 @@ services: ENTRYPOINT ["flwr-clientapp"] command: - - --supernode + - --insecure + - --clientappio-api-address - supernode-2:9095 deploy: resources: @@ -104,7 +106,7 @@ services: # build: # context: ${PROJECT_DIR:-.} # dockerfile_inline: | - # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + # FROM flwr/clientapp:${FLWR_VERSION:-1.14.0} # WORKDIR /app # COPY --chown=app:app pyproject.toml . @@ -113,7 +115,8 @@ services: # ENTRYPOINT ["flwr-clientapp"] # command: - # - --supernode + # - --insecure + # - --clientappio-api-address # - supernode-3:9096 # deploy: # resources: diff --git a/src/docker/distributed/server/compose.yml b/src/docker/distributed/server/compose.yml index f53b63593eb8..0f80cc97ee2f 100644 --- a/src/docker/distributed/server/compose.yml +++ b/src/docker/distributed/server/compose.yml @@ -1,7 +1,9 @@ services: superlink: - image: flwr/superlink:${FLWR_VERSION:-1.12.0} + image: flwr/superlink:${FLWR_VERSION:-1.14.0} command: + - --isolation + - process - --ssl-ca-certfile=certificates/ca.crt - --ssl-certfile=certificates/server.pem - --ssl-keyfile=certificates/server.key @@ -17,38 +19,24 @@ services: target: /app/certificates/server.key ports: - 9092:9092 + - 9093:9093 - superexec: + serverapp: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + FROM flwr/serverapp:${FLWR_VERSION:-1.14.0} WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-superexec"] + ENTRYPOINT ["flwr-serverapp"] command: - - --executor - - flwr.superexec.deployment:executor - - --executor-config - - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" - - --ssl-ca-certfile=certificates/ca.crt - - --ssl-certfile=certificates/server.pem - - --ssl-keyfile=certificates/server.key - secrets: - - source: superlink-ca-certfile - target: /app/certificates/superlink-ca.crt - - source: superexec-ca-certfile - target: /app/certificates/ca.crt - - source: superexec-certfile - target: /app/certificates/server.pem - - source: superexec-keyfile - target: /app/certificates/server.key - ports: - - 9093:9093 + - --insecure + - --serverappio-api-address + - superlink:9091 depends_on: - superlink @@ -59,9 +47,3 @@ secrets: file: ../superlink-certificates/server.pem superlink-keyfile: file: ../superlink-certificates/server.key - superexec-ca-certfile: - file: ../superexec-certificates/ca.crt - superexec-certfile: - file: ../superexec-certificates/server.pem - superexec-keyfile: - file: ../superexec-certificates/server.key diff --git a/src/docker/serverapp/Dockerfile b/src/docker/serverapp/Dockerfile index 08eceacc4557..f26034b6c156 100644 --- a/src/docker/serverapp/Dockerfile +++ b/src/docker/serverapp/Dockerfile @@ -17,4 +17,4 @@ ARG BASE_REPOSITORY=flwr/base ARG BASE_IMAGE FROM $BASE_REPOSITORY:$BASE_IMAGE -ENTRYPOINT ["flower-server-app"] +ENTRYPOINT ["flwr-serverapp"] diff --git a/src/docker/serverapp/README.md b/src/docker/serverapp/README.md index 110712fe3bfd..e2dad5ee8c56 100644 --- a/src/docker/serverapp/README.md +++ b/src/docker/serverapp/README.md @@ -21,8 +21,14 @@ - `unstable` - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` +- `nightly`, `.dev` e.g. `1.14.0.dev20241128` - uses Python 3.11 and Ubuntu 24.04 +- `1.13.1`, `1.13.1-py3.11-ubuntu24.04` +- `1.13.1-py3.10-ubuntu24.04` +- `1.13.1-py3.9-ubuntu24.04` +- `1.13.0`, `1.13.0-py3.11-ubuntu24.04` +- `1.13.0-py3.10-ubuntu24.04` +- `1.13.0-py3.9-ubuntu24.04` - `1.12.0`, `1.12.0-py3.11-ubuntu24.04` - `1.12.0-py3.10-ubuntu24.04` - `1.12.0-py3.9-ubuntu24.04` diff --git a/src/docker/superexec/README.md b/src/docker/superexec/README.md deleted file mode 100644 index 8026db18b978..000000000000 --- a/src/docker/superexec/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Flower SuperExec - -

- - Flower Website - -

- -## Quick reference - -- **Learn more:**
- [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) - -- **Where to get help:**
- [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) - -- **Supported architectures:**
- `amd64`, `arm64v8` - -## Supported tags - -- `unstable` - - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` - - uses Python 3.11 and Ubuntu 24.04 -- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` -- `1.12.0-py3.10-ubuntu24.04` -- `1.12.0-py3.9-ubuntu24.04` -- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` -- `1.11.1-py3.10-ubuntu22.04` -- `1.11.1-py3.9-ubuntu22.04` -- `1.11.1-py3.8-ubuntu22.04` -- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` -- `1.11.0-py3.10-ubuntu22.04` -- `1.11.0-py3.9-ubuntu22.04` -- `1.11.0-py3.8-ubuntu22.04` -- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` -- `1.10.0-py3.10-ubuntu22.04` -- `1.10.0-py3.9-ubuntu22.04` -- `1.10.0-py3.8-ubuntu22.04` diff --git a/src/docker/superlink/README.md b/src/docker/superlink/README.md index af03ce1c8054..47827ba6c0ef 100644 --- a/src/docker/superlink/README.md +++ b/src/docker/superlink/README.md @@ -21,8 +21,12 @@ - `unstable` - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` +- `nightly`, `.dev` e.g. `1.14.0.dev20241128` - uses Python 3.11 and Ubuntu 24.04 +- `1.13.1`, `1.13.1-py3.11-alpine3.19` +- `1.13.1-py3.11-ubuntu24.04` +- `1.13.0`, `1.13.0-py3.11-alpine3.19` +- `1.13.0-py3.11-ubuntu24.04` - `1.12.0`, `1.12.0-py3.11-alpine3.19` - `1.12.0-py3.11-ubuntu24.04` - `1.11.1`, `1.11.1-py3.11-alpine3.19` diff --git a/src/docker/supernode/README.md b/src/docker/supernode/README.md index 493f98cc78e4..69e5c69e1bec 100644 --- a/src/docker/supernode/README.md +++ b/src/docker/supernode/README.md @@ -21,8 +21,16 @@ - `unstable` - points to the last successful build of the `main` branch -- `nightly`, `.dev` e.g. `1.13.0.dev20241014` +- `nightly`, `.dev` e.g. `1.14.0.dev20241128` - uses Python 3.11 and Ubuntu 24.04 +- `1.13.1`, `1.13.1-py3.11-alpine3.19` +- `1.13.1-py3.11-ubuntu24.04` +- `1.13.1-py3.10-ubuntu24.04` +- `1.13.1-py3.9-ubuntu24.04` +- `1.13.0`, `1.13.0-py3.11-alpine3.19` +- `1.13.0-py3.11-ubuntu24.04` +- `1.13.0-py3.10-ubuntu24.04` +- `1.13.0-py3.9-ubuntu24.04` - `1.12.0`, `1.12.0-py3.11-alpine3.19` - `1.12.0-py3.11-ubuntu24.04` - `1.12.0-py3.10-ubuntu24.04` diff --git a/src/kotlin/gradle.properties b/src/kotlin/gradle.properties index c792dc1c822b..459c4ca9cf0e 100644 --- a/src/kotlin/gradle.properties +++ b/src/kotlin/gradle.properties @@ -30,7 +30,7 @@ POM_ARTIFACT_ID=flwr VERSION_NAME=0.0.2 POM_NAME=Flower Android -POM_DESCRIPTION=A Friendly Federated Learning Framework +POM_DESCRIPTION=A Friendly Federated AI Framework POM_INCEPTION_YEAR=2023 POM_URL=https://github.com/adap/flower/ diff --git a/src/proto/flwr/proto/exec.proto b/src/proto/flwr/proto/exec.proto index ad0723c0480c..583c42ff5704 100644 --- a/src/proto/flwr/proto/exec.proto +++ b/src/proto/flwr/proto/exec.proto @@ -19,6 +19,8 @@ package flwr.proto; import "flwr/proto/fab.proto"; import "flwr/proto/transport.proto"; +import "flwr/proto/recordset.proto"; +import "flwr/proto/run.proto"; service Exec { // Start run upon request @@ -26,13 +28,27 @@ service Exec { // Start log stream upon request rpc StreamLogs(StreamLogsRequest) returns (stream StreamLogsResponse) {} + + // flwr ls command + rpc ListRuns(ListRunsRequest) returns (ListRunsResponse) {} } message StartRunRequest { Fab fab = 1; map override_config = 2; - map federation_config = 3; + ConfigsRecord federation_options = 3; } message StartRunResponse { uint64 run_id = 1; } -message StreamLogsRequest { uint64 run_id = 1; } -message StreamLogsResponse { string log_output = 1; } +message StreamLogsRequest { + uint64 run_id = 1; + double after_timestamp = 2; +} +message StreamLogsResponse { + string log_output = 1; + double latest_timestamp = 2; +} +message ListRunsRequest { optional uint64 run_id = 1; } +message ListRunsResponse { + map run_dict = 1; + string now = 2; +} diff --git a/src/proto/flwr/proto/log.proto b/src/proto/flwr/proto/log.proto new file mode 100644 index 000000000000..83ace518f7b5 --- /dev/null +++ b/src/proto/flwr/proto/log.proto @@ -0,0 +1,27 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/node.proto"; + +message PushLogsRequest { + Node node = 1; + uint64 run_id = 2; + repeated string logs = 3; +} +message PushLogsResponse {} diff --git a/src/proto/flwr/proto/message.proto b/src/proto/flwr/proto/message.proto index 7066da5b7e76..cbe4bf7e027f 100644 --- a/src/proto/flwr/proto/message.proto +++ b/src/proto/flwr/proto/message.proto @@ -28,10 +28,11 @@ message Message { } message Context { - uint64 node_id = 1; - map node_config = 2; - RecordSet state = 3; - map run_config = 4; + uint64 run_id = 1; + uint64 node_id = 2; + map node_config = 3; + RecordSet state = 4; + map run_config = 5; } message Metadata { diff --git a/src/proto/flwr/proto/run.proto b/src/proto/flwr/proto/run.proto index 4312e1127cc2..75bd0c8860d9 100644 --- a/src/proto/flwr/proto/run.proto +++ b/src/proto/flwr/proto/run.proto @@ -19,6 +19,7 @@ package flwr.proto; import "flwr/proto/fab.proto"; import "flwr/proto/node.proto"; +import "flwr/proto/recordset.proto"; import "flwr/proto/transport.proto"; message Run { @@ -27,6 +28,11 @@ message Run { string fab_version = 3; map override_config = 4; string fab_hash = 5; + string pending_at = 6; + string starting_at = 7; + string running_at = 8; + string finished_at = 9; + RunStatus status = 10; } message RunStatus { @@ -67,3 +73,7 @@ message GetRunStatusRequest { repeated uint64 run_ids = 2; } message GetRunStatusResponse { map run_status_dict = 1; } + +// Get Federation Options associated with run +message GetFederationOptionsRequest { uint64 run_id = 1; } +message GetFederationOptionsResponse { ConfigsRecord federation_options = 1; } diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/serverappio.proto similarity index 67% rename from src/proto/flwr/proto/driver.proto rename to src/proto/flwr/proto/serverappio.proto index e26003862a76..3d8d3d6aa0d6 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/serverappio.proto @@ -17,12 +17,14 @@ syntax = "proto3"; package flwr.proto; +import "flwr/proto/log.proto"; import "flwr/proto/node.proto"; +import "flwr/proto/message.proto"; import "flwr/proto/task.proto"; import "flwr/proto/run.proto"; import "flwr/proto/fab.proto"; -service Driver { +service ServerAppIo { // Request run_id rpc CreateRun(CreateRunRequest) returns (CreateRunResponse) {} @@ -40,6 +42,21 @@ service Driver { // Get FAB rpc GetFab(GetFabRequest) returns (GetFabResponse) {} + + // Pull ServerApp inputs + rpc PullServerAppInputs(PullServerAppInputsRequest) + returns (PullServerAppInputsResponse) {} + + // Push ServerApp outputs + rpc PushServerAppOutputs(PushServerAppOutputsRequest) + returns (PushServerAppOutputsResponse) {} + + // Update the status of a given run + rpc UpdateRunStatus(UpdateRunStatusRequest) + returns (UpdateRunStatusResponse) {} + + // Push ServerApp logs + rpc PushLogs(PushLogsRequest) returns (PushLogsResponse) {} } // GetNodes messages @@ -56,3 +73,18 @@ message PullTaskResRequest { repeated string task_ids = 2; } message PullTaskResResponse { repeated TaskRes task_res_list = 1; } + +// PullServerAppInputs messages +message PullServerAppInputsRequest {} +message PullServerAppInputsResponse { + Context context = 1; + Run run = 2; + Fab fab = 3; +} + +// PushServerAppOutputs messages +message PushServerAppOutputsRequest { + uint64 run_id = 1; + Context context = 2; +} +message PushServerAppOutputsResponse {} diff --git a/src/proto/flwr/proto/simulationio.proto b/src/proto/flwr/proto/simulationio.proto new file mode 100644 index 000000000000..9597e33cc65f --- /dev/null +++ b/src/proto/flwr/proto/simulationio.proto @@ -0,0 +1,59 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/log.proto"; +import "flwr/proto/message.proto"; +import "flwr/proto/run.proto"; +import "flwr/proto/fab.proto"; + +service SimulationIo { + // Pull Simulation inputs + rpc PullSimulationInputs(PullSimulationInputsRequest) + returns (PullSimulationInputsResponse) {} + + // Push Simulation outputs + rpc PushSimulationOutputs(PushSimulationOutputsRequest) + returns (PushSimulationOutputsResponse) {} + + // Update the status of a given run + rpc UpdateRunStatus(UpdateRunStatusRequest) + returns (UpdateRunStatusResponse) {} + + // Push ServerApp logs + rpc PushLogs(PushLogsRequest) returns (PushLogsResponse) {} + + // Get Federation Options + rpc GetFederationOptions(GetFederationOptionsRequest) + returns (GetFederationOptionsResponse) {} +} + +// PullSimulationInputs messages +message PullSimulationInputsRequest {} +message PullSimulationInputsResponse { + Context context = 1; + Run run = 2; + Fab fab = 3; +} + +// PushSimulationOutputs messages +message PushSimulationOutputsRequest { + uint64 run_id = 1; + Context context = 2; +} +message PushSimulationOutputsResponse {} diff --git a/src/py/flwr/cli/app.py b/src/py/flwr/cli/app.py index 8baccb4638fc..35bd21ebd55e 100644 --- a/src/py/flwr/cli/app.py +++ b/src/py/flwr/cli/app.py @@ -20,6 +20,7 @@ from .build import build from .install import install from .log import log +from .ls import ls from .new import new from .run import run @@ -37,6 +38,7 @@ app.command()(build) app.command()(install) app.command()(log) +app.command()(ls) typer_click_object = get_command(app) diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py index 4c9dca4ebcf1..e86f4bb762b3 100644 --- a/src/py/flwr/cli/build.py +++ b/src/py/flwr/cli/build.py @@ -81,8 +81,8 @@ def build( if not is_valid_project_name(app.name): typer.secho( f"❌ The project name {app.name} is invalid, " - "a valid project name must start with a letter or an underscore, " - "and can only contain letters, digits, and underscores.", + "a valid project name must start with a letter, " + "and can only contain letters, digits, and hyphens.", fg=typer.colors.RED, bold=True, ) diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py index 73ce779c3b5c..40fa1e0d3b98 100644 --- a/src/py/flwr/cli/config_utils.py +++ b/src/py/flwr/cli/config_utils.py @@ -20,6 +20,7 @@ from typing import IO, Any, Optional, Union, get_args import tomli +import typer from flwr.common import object_ref from flwr.common.typing import UserConfigValue @@ -227,3 +228,99 @@ def load_from_string(toml_content: str) -> Optional[dict[str, Any]]: return data except tomli.TOMLDecodeError: return None + + +def validate_project_config( + config: Union[dict[str, Any], None], errors: list[str], warnings: list[str] +) -> dict[str, Any]: + """Validate and return the Flower project configuration.""" + if config is None: + typer.secho( + "Project configuration could not be loaded.\n" + "pyproject.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if warnings: + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, + ) + + typer.secho("Success", fg=typer.colors.GREEN) + + return config + + +def validate_federation_in_project_config( + federation: Optional[str], config: dict[str, Any] +) -> tuple[str, dict[str, Any]]: + """Validate the federation name in the Flower project configuration.""" + federation = federation or config["tool"]["flwr"]["federations"].get("default") + + if federation is None: + typer.secho( + "❌ No federation name was provided and the project's `pyproject.toml` " + "doesn't declare a default federation (with an Exec API address or an " + "`options.num-supernodes` value).", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Validate the federation exists in the configuration + federation_config = config["tool"]["flwr"]["federations"].get(federation) + if federation_config is None: + available_feds = { + fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" + } + typer.secho( + f"❌ There is no `{federation}` federation declared in the " + "`pyproject.toml`.\n The following federations were found:\n\n" + + "\n".join(available_feds), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + return federation, federation_config + + +def validate_certificate_in_federation_config( + app: Path, federation_config: dict[str, Any] +) -> tuple[bool, Optional[bytes]]: + """Validate the certificates in the Flower project configuration.""" + insecure_str = federation_config.get("insecure") + if root_certificates := federation_config.get("root-certificates"): + root_certificates_bytes = (app / root_certificates).read_bytes() + if insecure := bool(insecure_str): + typer.secho( + "❌ `root_certificates` were provided but the `insecure` parameter " + "is set to `True`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + else: + root_certificates_bytes = None + if insecure_str is None: + typer.secho( + "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + if not (insecure := bool(insecure_str)): + typer.secho( + "❌ No certificate were given yet `insecure` is set to `False`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + return insecure, root_certificates_bytes diff --git a/src/py/flwr/cli/config_utils_test.py b/src/py/flwr/cli/config_utils_test.py index ddabc152bc0f..01003b25abed 100644 --- a/src/py/flwr/cli/config_utils_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -17,9 +17,19 @@ import os import textwrap from pathlib import Path -from typing import Any +from typing import Any, Optional -from .config_utils import load, validate, validate_fields +import click +import pytest + +from .config_utils import ( + load, + validate, + validate_certificate_in_federation_config, + validate_federation_in_project_config, + validate_fields, + validate_project_config, +) def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: @@ -328,3 +338,205 @@ def test_validate_pyproject_toml_fail() -> None: assert not is_valid assert len(errors) == 1 assert len(warnings) == 0 + + +def test_validate_project_config_fail() -> None: + """Test that validate_project_config fails correctly.""" + # Prepare + config = None + errors = ["Error"] + warnings = ["Warning"] + + # Execute + with pytest.raises(click.exceptions.Exit) as excinfo: + _ = validate_project_config(config, errors, warnings) + + # Assert + assert excinfo.value.exit_code == 1 + + +def test_validate_federation_in_project_config() -> None: + """Test that validate_federation_in_config succeeds correctly.""" + # Prepare - Test federation is None + config: dict[str, Any] = { + "project": { + "name": "fedgpt", + "version": "1.0.0", + "description": "", + "license": "", + "authors": [], + }, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "flwr.cli.run:run", + "clientapp": "flwr.cli.run:run", + }, + }, + "federations": { + "default": "default_federation", + "default_federation": {"default_key": "default_val"}, + }, + }, + }, + } + federation = None + + # Execute + federation, federation_config = validate_federation_in_project_config( + federation, config + ) + + # Assert + assert federation == "default_federation" + assert federation_config == {"default_key": "default_val"} + + federation = "new_federation" + config["tool"]["flwr"]["federations"]["new_federation"] = {"new_key": "new_val"} + + # Execute + federation, federation_config = validate_federation_in_project_config( + federation, config + ) + + # Assert + assert federation == "new_federation" + assert federation_config == {"new_key": "new_val"} + + +def test_validate_federation_in_project_config_fail() -> None: + """Test that validate_federation_in_config fails correctly.""" + + def run_and_assert_exit(federation: Optional[str], config: dict[str, Any]) -> None: + """Execute validation and assert exit code is 1.""" + with pytest.raises(click.exceptions.Exit) as excinfo: + validate_federation_in_project_config(federation, config) + assert excinfo.value.exit_code == 1 + + # Prepare + config: dict[str, Any] = { + "project": { + "name": "fedgpt", + "version": "1.0.0", + "description": "", + "license": "", + "authors": [], + }, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "flwr.cli.run:run", + "clientapp": "flwr.cli.run:run", + }, + }, + "federations": {}, + }, + }, + } + federation = None + + # Test federation is None and no default federation is declared + # Execute and assert + run_and_assert_exit(federation, config) + + # Prepare - Test federation name is not in config + federation = "fed_not_in_config" + config["tool"]["flwr"]["federations"] = {"fed_in_config": {}} + + # Execute and assert + run_and_assert_exit(federation, config) + + +def test_validate_certificate_in_federation_config(tmp_path: Path) -> None: + """Test that validate_certificate_in_federation_config succeeds correctly.""" + # Prepare + config: dict[str, Any] = { + "address": "127.0.0.1:9091", + "root-certificates": "dummy_cert.pem", + } + dummy_cert = tmp_path / "dummy_cert.pem" + dummy_cert.write_text("dummy_cert") + + # Current directory + origin = Path.cwd() + + try: + # Change into the temporary directory + os.chdir(tmp_path) + + # Test insecure is not declared and root_certificates is present + # Execute + insecure, root_cert = validate_certificate_in_federation_config( + tmp_path, config + ) + # Assert + assert not insecure + assert root_cert == b"dummy_cert" + + # Test insecure is False and root_certificates is present + config["insecure"] = False + # Execute + insecure, root_cert = validate_certificate_in_federation_config( + tmp_path, config + ) + # Assert + assert not insecure + assert root_cert == b"dummy_cert" + + # Test insecure is True and root_certificates is None + config["insecure"] = True + config.pop("root-certificates") + + # Execute + insecure, root_cert = validate_certificate_in_federation_config( + tmp_path, config + ) + # Assert + assert insecure + assert root_cert is None + finally: + os.chdir(origin) + + +def test_validate_certificate_in_federation_config_fail(tmp_path: Path) -> None: + """Test that validate_certificate_in_federation_config fails correctly.""" + + def run_and_assert_exit(app: Path, config: dict[str, Any]) -> None: + """Execute validation and assert exit code is 1.""" + with pytest.raises(click.exceptions.Exit) as excinfo: + validate_certificate_in_federation_config(app, config) + assert excinfo.value.exit_code == 1 + + # Prepare + config: dict[str, Any] = {"address": "localhost:8080"} + dummy_cert = tmp_path / "dummy_cert.pem" + dummy_cert.write_text("dummy_cert") + + # Current directory + origin = Path.cwd() + + try: + # Change into the temporary directory + os.chdir(tmp_path) + + # Test insecure is None and root_certificates is None + config["insecure"] = None + # Execute and assert + run_and_assert_exit(tmp_path, config) + + # Test insecure is False, but root_certificates is None + config["insecure"] = False + # Execute and assert + run_and_assert_exit(tmp_path, config) + + # Test insecure is True, but root_certificates is not None + config["root-certificates"] = "dummy_cert.pem" + config["insecure"] = True + # Execute and assert + run_and_assert_exit(tmp_path, config) + finally: + os.chdir(origin) diff --git a/src/py/flwr/cli/install.py b/src/py/flwr/cli/install.py index 7451aa3d2326..c2dd7b1585fd 100644 --- a/src/py/flwr/cli/install.py +++ b/src/py/flwr/cli/install.py @@ -16,7 +16,6 @@ import hashlib import shutil -import subprocess import tempfile import zipfile from io import BytesIO @@ -188,21 +187,6 @@ def validate_and_install( else: shutil.copy2(item, install_dir / item.name) - try: - subprocess.run( - ["pip", "install", "-e", install_dir, "--no-deps"], - capture_output=True, - text=True, - check=True, - ) - except subprocess.CalledProcessError as e: - typer.secho( - f"❌ Failed to `pip install` package(s) from {install_dir}:\n{e.stderr}", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) from e - typer.secho( f"🎊 Successfully installed {project_name} to {install_dir}.", fg=typer.colors.GREEN, diff --git a/src/py/flwr/cli/log.py b/src/py/flwr/cli/log.py index 7199cefce4f7..5ce3559236e0 100644 --- a/src/py/flwr/cli/log.py +++ b/src/py/flwr/cli/log.py @@ -14,33 +14,38 @@ # ============================================================================== """Flower command line interface `log` command.""" -import sys import time from logging import DEBUG, ERROR, INFO from pathlib import Path -from typing import Annotated, Optional +from typing import Annotated, Any, Optional, cast import grpc import typer -from flwr.cli.config_utils import load_and_validate +from flwr.cli.config_utils import ( + load_and_validate, + validate_certificate_in_federation_config, + validate_federation_in_project_config, + validate_project_config, +) +from flwr.common.constant import CONN_RECONNECT_INTERVAL, CONN_REFRESH_PERIOD from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel from flwr.common.logger import log as logger from flwr.proto.exec_pb2 import StreamLogsRequest # pylint: disable=E0611 from flwr.proto.exec_pb2_grpc import ExecStub -CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) - def start_stream( run_id: int, channel: grpc.Channel, refresh_period: int = CONN_REFRESH_PERIOD ) -> None: """Start log streaming for a given run ID.""" + stub = ExecStub(channel) + after_timestamp = 0.0 try: + logger(INFO, "Starting logstream for run_id `%s`", run_id) while True: - logger(INFO, "Starting logstream for run_id `%s`", run_id) - stream_logs(run_id, channel, refresh_period) - time.sleep(2) + after_timestamp = stream_logs(run_id, stub, refresh_period, after_timestamp) + time.sleep(CONN_RECONNECT_INTERVAL) logger(DEBUG, "Reconnecting to logstream") except KeyboardInterrupt: logger(INFO, "Exiting logstream") @@ -54,16 +59,44 @@ def start_stream( channel.close() -def stream_logs(run_id: int, channel: grpc.Channel, duration: int) -> None: - """Stream logs from the beginning of a run with connection refresh.""" - start_time = time.time() - stub = ExecStub(channel) - req = StreamLogsRequest(run_id=run_id) +def stream_logs( + run_id: int, stub: ExecStub, duration: int, after_timestamp: float +) -> float: + """Stream logs from the beginning of a run with connection refresh. + + Parameters + ---------- + run_id : int + The identifier of the run. + stub : ExecStub + The gRPC stub to interact with the Exec service. + duration : int + The timeout duration for each stream connection in seconds. + after_timestamp : float + The timestamp to start streaming logs from. + + Returns + ------- + float + The latest timestamp from the streamed logs or the provided `after_timestamp` + if no logs are returned. + """ + req = StreamLogsRequest(run_id=run_id, after_timestamp=after_timestamp) + + latest_timestamp = 0.0 + res = None + try: + for res in stub.StreamLogs(req, timeout=duration): + print(res.log_output, end="") + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() != grpc.StatusCode.DEADLINE_EXCEEDED: + raise e + finally: + if res is not None: + latest_timestamp = cast(float, res.latest_timestamp) - for res in stub.StreamLogs(req): - print(res.log_output) - if time.time() - start_time > duration: - break + return max(latest_timestamp, after_timestamp) def print_logs(run_id: int, channel: grpc.Channel, timeout: int) -> None: @@ -124,100 +157,33 @@ def log( pyproject_path = app / "pyproject.toml" if app else None config, errors, warnings = load_and_validate(path=pyproject_path) - - if config is None: - typer.secho( - "Project configuration could not be loaded.\n" - "pyproject.toml is invalid:\n" - + "\n".join([f"- {line}" for line in errors]), - fg=typer.colors.RED, - bold=True, - ) - sys.exit() - - if warnings: - typer.secho( - "Project configuration is missing the following " - "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), - fg=typer.colors.RED, - bold=True, - ) - - typer.secho("Success", fg=typer.colors.GREEN) - - federation = federation or config["tool"]["flwr"]["federations"].get("default") - - if federation is None: - typer.secho( - "❌ No federation name was provided and the project's `pyproject.toml` " - "doesn't declare a default federation (with a SuperExec address or an " - "`options.num-supernodes` value).", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - - # Validate the federation exists in the configuration - federation_config = config["tool"]["flwr"]["federations"].get(federation) - if federation_config is None: - available_feds = { - fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" - } - typer.secho( - f"❌ There is no `{federation}` federation declared in the " - "`pyproject.toml`.\n The following federations were found:\n\n" - + "\n".join(available_feds), - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) + config = validate_project_config(config, errors, warnings) + federation, federation_config = validate_federation_in_project_config( + federation, config + ) if "address" not in federation_config: typer.secho( - "❌ `flwr log` currently works with `SuperExec`. Ensure that the correct" - "`SuperExec` address is provided in the `pyproject.toml`.", + "❌ `flwr log` currently works with Exec API. Ensure that the correct" + "Exec API address is provided in the `pyproject.toml`.", fg=typer.colors.RED, bold=True, ) raise typer.Exit(code=1) - _log_with_superexec(federation_config, run_id, stream) + _log_with_exec_api(app, federation_config, run_id, stream) -# pylint: disable-next=too-many-branches -def _log_with_superexec( - federation_config: dict[str, str], +def _log_with_exec_api( + app: Path, + federation_config: dict[str, Any], run_id: int, stream: bool, ) -> None: - insecure_str = federation_config.get("insecure") - if root_certificates := federation_config.get("root-certificates"): - root_certificates_bytes = Path(root_certificates).read_bytes() - if insecure := bool(insecure_str): - typer.secho( - "❌ `root_certificates` were provided but the `insecure` parameter" - "is set to `True`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - else: - root_certificates_bytes = None - if insecure_str is None: - typer.secho( - "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - if not (insecure := bool(insecure_str)): - typer.secho( - "❌ No certificate were given yet `insecure` is set to `False`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) + insecure, root_certificates_bytes = validate_certificate_in_federation_config( + app, federation_config + ) channel = create_channel( server_address=federation_config["address"], insecure=insecure, diff --git a/src/py/flwr/cli/log_test.py b/src/py/flwr/cli/log_test.py index 932610bea2f3..d1c86559eec8 100644 --- a/src/py/flwr/cli/log_test.py +++ b/src/py/flwr/cli/log_test.py @@ -32,6 +32,11 @@ def log_output(self) -> NoReturn: """Raise KeyboardInterrupt to exit logstream test gracefully.""" raise KeyboardInterrupt + @property + def latest_timestamp(self) -> NoReturn: + """Raise KeyboardInterrupt to exit logstream test gracefully.""" + raise KeyboardInterrupt + class TestFlwrLog(unittest.TestCase): """Unit tests for `flwr log` CLI functions.""" @@ -66,7 +71,9 @@ def test_flwr_log_stream_method(self) -> None: """Test stream_logs.""" with patch("builtins.print") as mock_print: with self.assertRaises(KeyboardInterrupt): - stream_logs(run_id=123, channel=self.mock_channel, duration=1) + stream_logs( + run_id=123, stub=self.mock_stub, duration=1, after_timestamp=0.0 + ) # Assert that mock print was called with the expected arguments mock_print.assert_has_calls(self.expected_calls) diff --git a/src/py/flwr/cli/ls.py b/src/py/flwr/cli/ls.py new file mode 100644 index 000000000000..54173f5f6018 --- /dev/null +++ b/src/py/flwr/cli/ls.py @@ -0,0 +1,228 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `ls` command.""" + + +from datetime import datetime, timedelta +from logging import DEBUG +from pathlib import Path +from typing import Annotated, Any, Optional + +import grpc +import typer +from rich.console import Console +from rich.table import Table +from rich.text import Text + +from flwr.cli.config_utils import ( + load_and_validate, + validate_certificate_in_federation_config, + validate_federation_in_project_config, + validate_project_config, +) +from flwr.common.constant import FAB_CONFIG_FILE, SubStatus +from flwr.common.date import format_timedelta, isoformat8601_utc +from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel +from flwr.common.logger import log +from flwr.common.serde import run_from_proto +from flwr.common.typing import Run +from flwr.proto.exec_pb2 import ( # pylint: disable=E0611 + ListRunsRequest, + ListRunsResponse, +) +from flwr.proto.exec_pb2_grpc import ExecStub + + +def ls( + app: Annotated[ + Path, + typer.Argument(help="Path of the Flower project"), + ] = Path("."), + federation: Annotated[ + Optional[str], + typer.Argument(help="Name of the federation"), + ] = None, + runs: Annotated[ + bool, + typer.Option( + "--runs", + help="List all runs", + ), + ] = False, + run_id: Annotated[ + Optional[int], + typer.Option( + "--run-id", + help="Specific run ID to display", + ), + ] = None, +) -> None: + """List runs.""" + # Load and validate federation config + typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) + + pyproject_path = app / FAB_CONFIG_FILE if app else None + config, errors, warnings = load_and_validate(path=pyproject_path) + config = validate_project_config(config, errors, warnings) + federation, federation_config = validate_federation_in_project_config( + federation, config + ) + + if "address" not in federation_config: + typer.secho( + "❌ `flwr ls` currently works with Exec API. Ensure that the correct" + "Exec API address is provided in the `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + try: + if runs and run_id is not None: + raise ValueError( + "The options '--runs' and '--run-id' are mutually exclusive." + ) + + channel = _init_channel(app, federation_config) + stub = ExecStub(channel) + + # Display information about a specific run ID + if run_id is not None: + typer.echo(f"🔍 Displaying information for run ID {run_id}...") + _display_one_run(stub, run_id) + # By default, list all runs + else: + typer.echo("📄 Listing all runs...") + _list_runs(stub) + + except ValueError as err: + typer.secho( + f"❌ {err}", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) from err + finally: + channel.close() + + +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + log(DEBUG, channel_connectivity) + + +def _init_channel(app: Path, federation_config: dict[str, Any]) -> grpc.Channel: + """Initialize gRPC channel to the Exec API.""" + insecure, root_certificates_bytes = validate_certificate_in_federation_config( + app, federation_config + ) + channel = create_channel( + server_address=federation_config["address"], + insecure=insecure, + root_certificates=root_certificates_bytes, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + interceptors=None, + ) + channel.subscribe(on_channel_state_change) + return channel + + +def _format_run_table(run_dict: dict[int, Run], now_isoformat: str) -> Table: + """Format run status as a rich Table.""" + table = Table(header_style="bold cyan", show_lines=True) + + def _format_datetime(dt: Optional[datetime]) -> str: + return isoformat8601_utc(dt).replace("T", " ") if dt else "N/A" + + # Add columns + table.add_column( + Text("Run ID", justify="center"), style="bright_white", overflow="fold" + ) + table.add_column(Text("FAB", justify="center"), style="dim white") + table.add_column(Text("Status", justify="center")) + table.add_column(Text("Elapsed", justify="center"), style="blue") + table.add_column(Text("Created At", justify="center"), style="dim white") + table.add_column(Text("Running At", justify="center"), style="dim white") + table.add_column(Text("Finished At", justify="center"), style="dim white") + + # Add rows + for run in sorted( + run_dict.values(), key=lambda x: datetime.fromisoformat(x.pending_at) + ): + # Combine status and sub-status into a single string + if run.status.sub_status == "": + status_text = run.status.status + else: + status_text = f"{run.status.status}:{run.status.sub_status}" + + # Style the status based on its value + sub_status = run.status.sub_status + if sub_status == SubStatus.COMPLETED: + status_style = "green" + elif sub_status == SubStatus.FAILED: + status_style = "red" + else: + status_style = "yellow" + + # Convert isoformat to datetime + pending_at = datetime.fromisoformat(run.pending_at) if run.pending_at else None + running_at = datetime.fromisoformat(run.running_at) if run.running_at else None + finished_at = ( + datetime.fromisoformat(run.finished_at) if run.finished_at else None + ) + + # Calculate elapsed time + elapsed_time = timedelta() + if running_at: + if finished_at: + end_time = finished_at + else: + end_time = datetime.fromisoformat(now_isoformat) + elapsed_time = end_time - running_at + + table.add_row( + f"[bold]{run.run_id}[/bold]", + f"{run.fab_id} (v{run.fab_version})", + f"[{status_style}]{status_text}[/{status_style}]", + format_timedelta(elapsed_time), + _format_datetime(pending_at), + _format_datetime(running_at), + _format_datetime(finished_at), + ) + return table + + +def _list_runs( + stub: ExecStub, +) -> None: + """List all runs.""" + res: ListRunsResponse = stub.ListRuns(ListRunsRequest()) + run_dict = {run_id: run_from_proto(proto) for run_id, proto in res.run_dict.items()} + + Console().print(_format_run_table(run_dict, res.now)) + + +def _display_one_run( + stub: ExecStub, + run_id: int, +) -> None: + """Display information about a specific run.""" + res: ListRunsResponse = stub.ListRuns(ListRunsRequest(run_id=run_id)) + if not res.run_dict: + raise ValueError(f"Run ID {run_id} not found") + + run_dict = {run_id: run_from_proto(proto) for run_id, proto in res.run_dict.items()} + + Console().print(_format_run_table(run_dict, res.now)) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 3cbde991ff6e..f96753a37cae 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -268,20 +268,30 @@ def new( context=context, ) - print( - typer.style( - "🎊 Flower App creation successful.\n\n" - "Use the following command to run your Flower App:\n", - fg=typer.colors.GREEN, - bold=True, - ) + prompt = typer.style( + "🎊 Flower App creation successful.\n\n" + "To run your Flower App, use the following command:\n\n", + fg=typer.colors.GREEN, + bold=True, ) _add = " huggingface-cli login\n" if llm_challenge_str else "" - print( - typer.style( - f" cd {package_name}\n" + " pip install -e .\n" + _add + " flwr run\n", - fg=typer.colors.BRIGHT_CYAN, - bold=True, - ) + prompt += typer.style( + _add + f" flwr run {package_name}\n\n", + fg=typer.colors.BRIGHT_CYAN, + bold=True, + ) + + prompt += typer.style( + "If you haven't installed all dependencies yet, follow these steps:\n\n", + fg=typer.colors.GREEN, + bold=True, ) + + prompt += typer.style( + f" cd {package_name}\n" + " pip install -e .\n" + _add + " flwr run .\n", + fg=typer.colors.BRIGHT_CYAN, + bold=True, + ) + + print(prompt) diff --git a/src/py/flwr/cli/new/templates/app/README.md.tpl b/src/py/flwr/cli/new/templates/app/README.md.tpl index 32e95fc0763d..93bc462257d1 100644 --- a/src/py/flwr/cli/new/templates/app/README.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.md.tpl @@ -14,7 +14,18 @@ In the `$project_name` directory, use `flwr run` to run a local simulation: flwr run . ``` +Refer to the [How to Run Simulations](https://flower.ai/docs/framework/how-to-run-simulations.html) guide in the documentation for advice on how to optimize your simulations. + ## Run with the Deployment Engine > \[!NOTE\] > An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. + +## Resources + +- Flower website: [flower.ai](https://flower.ai/) +- Check the documentation: [flower.ai/docs](https://flower.ai/docs/) +- Give Flower a ⭐️ on GitHub: [GitHub](https://github.com/adap/flower) +- Join the Flower community! + - [Flower Slack](https://flower.ai/join-slack/) + - [Flower Discuss](https://discuss.flower.ai/) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl index 41381ef7c7a3..21c163f4f088 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl @@ -71,7 +71,7 @@ def load_data(partition_id: int, num_partitions: int, dataset_name: str): partitioners={"train": partitioner}, ) client_trainset = FDS.load_partition(partition_id, "train") - client_trainset = reformat(client_trainset, llm_task="generalnlp") + client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str") return client_trainset diff --git a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl index c70580009392..cdf340b030ee 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl index d34985d50433..9d7c1bdf1292 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl @@ -8,8 +8,9 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", + "torch==2.3.1", "trl==0.8.1", "bitsandbytes==0.43.0", "scipy==1.13.0", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl index 3515cbd69d17..d46c1c68e097 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets>=0.3.0", "torch==2.2.1", "transformers>=4.30.0,<5.0", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl index 7c55d3654a08..70c02a6b068b 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "jax==0.4.30", "jaxlib==0.4.30", "scikit-learn==1.3.2", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl index 9ea11ff3fc0c..5d4cc9987fb5 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "mlx==0.16.1", "numpy==1.24.4", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index 9f8f3aaab554..a82d5c137b83 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "numpy>=1.21.0", ] diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index fe5ac7735d66..e8c3f9fe57e8 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl index d5fec5f2f93f..23066f9f46ec 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.1.1", ] diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index 81a839b30998..b37353c8f63d 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -8,9 +8,9 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.12.0", + "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", - "tensorflow>=2.11.1", + "tensorflow>=2.11.1,<2.18.0", ] [tool.hatch.build.targets.wheel] diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index 4722effee53d..1b144379581e 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -16,7 +16,6 @@ import json import subprocess -import sys from logging import DEBUG from pathlib import Path from typing import Annotated, Any, Optional @@ -24,11 +23,24 @@ import typer from flwr.cli.build import build -from flwr.cli.config_utils import load_and_validate -from flwr.common.config import flatten_dict, parse_config_args +from flwr.cli.config_utils import ( + load_and_validate, + validate_certificate_in_federation_config, + validate_federation_in_project_config, + validate_project_config, +) +from flwr.common.config import ( + flatten_dict, + parse_config_args, + user_config_to_configsrecord, +) from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel from flwr.common.logger import log -from flwr.common.serde import fab_to_proto, user_config_to_proto +from flwr.common.serde import ( + configs_record_to_proto, + fab_to_proto, + user_config_to_proto, +) from flwr.common.typing import Fab from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 from flwr.proto.exec_pb2_grpc import ExecStub @@ -79,96 +91,28 @@ def run( pyproject_path = app / "pyproject.toml" if app else None config, errors, warnings = load_and_validate(path=pyproject_path) - - if config is None: - typer.secho( - "Project configuration could not be loaded.\n" - "pyproject.toml is invalid:\n" - + "\n".join([f"- {line}" for line in errors]), - fg=typer.colors.RED, - bold=True, - ) - sys.exit() - - if warnings: - typer.secho( - "Project configuration is missing the following " - "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), - fg=typer.colors.RED, - bold=True, - ) - - typer.secho("Success", fg=typer.colors.GREEN) - - federation = federation or config["tool"]["flwr"]["federations"].get("default") - - if federation is None: - typer.secho( - "❌ No federation name was provided and the project's `pyproject.toml` " - "doesn't declare a default federation (with a SuperExec address or an " - "`options.num-supernodes` value).", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - - # Validate the federation exists in the configuration - federation_config = config["tool"]["flwr"]["federations"].get(federation) - if federation_config is None: - available_feds = { - fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" - } - typer.secho( - f"❌ There is no `{federation}` federation declared in " - "`pyproject.toml`.\n The following federations were found:\n\n" - + "\n".join(available_feds), - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) + config = validate_project_config(config, errors, warnings) + federation, federation_config = validate_federation_in_project_config( + federation, config + ) if "address" in federation_config: - _run_with_superexec(app, federation_config, config_overrides, stream) + _run_with_exec_api(app, federation_config, config_overrides, stream) else: - _run_without_superexec(app, federation_config, config_overrides, federation) + _run_without_exec_api(app, federation_config, config_overrides, federation) -# pylint: disable=too-many-locals -def _run_with_superexec( +# pylint: disable-next=too-many-locals +def _run_with_exec_api( app: Path, federation_config: dict[str, Any], config_overrides: Optional[list[str]], stream: bool, ) -> None: - insecure_str = federation_config.get("insecure") - if root_certificates := federation_config.get("root-certificates"): - root_certificates_bytes = (app / root_certificates).read_bytes() - if insecure := bool(insecure_str): - typer.secho( - "❌ `root_certificates` were provided but the `insecure` parameter" - "is set to `True`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - else: - root_certificates_bytes = None - if insecure_str is None: - typer.secho( - "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - if not (insecure := bool(insecure_str)): - typer.secho( - "❌ No certificate were given yet `insecure` is set to `False`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) - + insecure, root_certificates_bytes = validate_certificate_in_federation_config( + app, federation_config + ) channel = create_channel( server_address=federation_config["address"], insecure=insecure, @@ -181,26 +125,30 @@ def _run_with_superexec( fab_path, fab_hash = build(app) content = Path(fab_path).read_bytes() + + # Delete FAB file once the bytes is computed + Path(fab_path).unlink() + fab = Fab(fab_hash, content) + # Construct a `ConfigsRecord` out of a flattened `UserConfig` + fed_conf = flatten_dict(federation_config.get("options", {})) + c_record = user_config_to_configsrecord(fed_conf) + req = StartRunRequest( fab=fab_to_proto(fab), override_config=user_config_to_proto(parse_config_args(config_overrides)), - federation_config=user_config_to_proto( - flatten_dict(federation_config.get("options")) - ), + federation_options=configs_record_to_proto(c_record), ) res = stub.StartRun(req) - # Delete FAB file once it has been sent to the SuperExec - Path(fab_path).unlink() typer.secho(f"🎊 Successfully started run {res.run_id}", fg=typer.colors.GREEN) if stream: start_stream(res.run_id, channel, CONN_REFRESH_PERIOD) -def _run_without_superexec( +def _run_without_exec_api( app: Optional[Path], federation_config: dict[str, Any], config_overrides: Optional[list[str]], diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index fdb62578292a..7046e3f2e4fc 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -32,13 +32,19 @@ from flwr.cli.install import install_from_fab from flwr.client.client import Client from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.nodestate.nodestate_factory import NodeStateFactory from flwr.client.typing import ClientFnExt from flwr.common import GRPC_MAX_MESSAGE_LENGTH, Context, EventType, Message, event from flwr.common.address import parse_address from flwr.common.constant import ( - CLIENTAPPIO_API_DEFAULT_ADDRESS, + CLIENT_OCTET, + CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, + MAX_RETRY_DELAY, MISSING_EXTRA_REST, RUN_ID_NUM_BYTES, + SERVER_OCTET, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_BIDI, TRANSPORT_TYPE_GRPC_RERE, @@ -52,18 +58,15 @@ from flwr.common.typing import Fab, Run, UserConfig from flwr.proto.clientappio_pb2_grpc import add_ClientAppIoServicer_to_server from flwr.server.superlink.fleet.grpc_bidi.grpc_server import generic_create_grpc_server -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .clientapp.clientappio_servicer import ClientAppInputs, ClientAppIoServicer from .grpc_adapter_client.connection import grpc_adapter from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response from .message_handler.message_handler import handle_control_message -from .node_state import NodeState from .numpy_client import NumPyClient - -ISOLATION_MODE_SUBPROCESS = "subprocess" -ISOLATION_MODE_PROCESS = "process" +from .run_info_store import DeprecatedRunInfoStore def _check_actionable_client( @@ -102,6 +105,11 @@ def start_client( ) -> None: """Start a Flower client node which connects to a Flower server. + Warning + ------- + This function is deprecated since 1.13.0. Use :code:`flower-supernode` command + instead to start a SuperNode. + Parameters ---------- server_address : str @@ -176,6 +184,17 @@ class `flwr.client.Client` (default: None) >>> root_certificates=Path("/crts/root.pem").read_bytes(), >>> ) """ + msg = ( + "flwr.client.start_client() is deprecated." + "\n\tInstead, use the `flower-supernode` CLI command to start a SuperNode " + "as shown below:" + "\n\n\t\t$ flower-supernode --insecure --superlink=':'" + "\n\n\tTo view all available options, run:" + "\n\n\t\t$ flower-supernode --help" + "\n\n\tUsing `start_client()` is deprecated." + ) + warn_deprecated_feature(name=msg) + event(EventType.START_CLIENT_ENTER) start_client_internal( server_address=server_address, @@ -216,7 +235,7 @@ def start_client_internal( max_wait_time: Optional[float] = None, flwr_path: Optional[Path] = None, isolation: Optional[str] = None, - supernode_address: Optional[str] = CLIENTAPPIO_API_DEFAULT_ADDRESS, + clientappio_api_address: Optional[str] = CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS, ) -> None: """Start a Flower client node which connects to a Flower server. @@ -274,9 +293,11 @@ class `flwr.client.Client` (default: None) `process`. Defaults to `None`, which runs the `ClientApp` in the same process as the SuperNode. If `subprocess`, the `ClientApp` runs in a subprocess started by the SueprNode and communicates using gRPC at the address - `supernode_address`. If `process`, the `ClientApp` runs in a separate isolated - process and communicates using gRPC at the address `supernode_address`. - supernode_address : Optional[str] (default: `CLIENTAPPIO_API_DEFAULT_ADDRESS`) + `clientappio_api_address`. If `process`, the `ClientApp` runs in a separate + isolated process and communicates using gRPC at the address + `clientappio_api_address`. + clientappio_api_address : Optional[str] + (default: `CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS`) The SuperNode gRPC server address. """ if insecure is None: @@ -304,15 +325,16 @@ def _load_client_app(_1: str, _2: str, _3: str) -> ClientApp: load_client_app_fn = _load_client_app if isolation: - if supernode_address is None: + if clientappio_api_address is None: raise ValueError( - f"`supernode_address` required when `isolation` is " + f"`clientappio_api_address` required when `isolation` is " f"{ISOLATION_MODE_SUBPROCESS} or {ISOLATION_MODE_PROCESS}", ) _clientappio_grpc_server, clientappio_servicer = run_clientappio_api_grpc( - address=supernode_address + address=clientappio_api_address, + certificates=None, ) - supernode_address = cast(str, supernode_address) + clientappio_api_address = cast(str, clientappio_api_address) # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly @@ -346,7 +368,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) retry_invoker = RetryInvoker( - wait_gen_factory=exponential, + wait_gen_factory=lambda: exponential(max_delay=MAX_RETRY_DELAY), recoverable_exceptions=connection_error_type, max_tries=max_retries + 1 if max_retries is not None else None, max_time=max_wait_time, @@ -364,8 +386,10 @@ def _on_backoff(retry_state: RetryState) -> None: on_backoff=_on_backoff, ) - # NodeState gets initialized when the first connection is established - node_state: Optional[NodeState] = None + # DeprecatedRunInfoStore gets initialized when the first connection is established + run_info_store: Optional[DeprecatedRunInfoStore] = None + state_factory = NodeStateFactory() + state = state_factory.state() runs: dict[int, Run] = {} @@ -382,7 +406,7 @@ def _on_backoff(retry_state: RetryState) -> None: receive, send, create_node, delete_node, get_run, get_fab = conn # Register node when connecting the first time - if node_state is None: + if run_info_store is None: if create_node is None: if transport not in ["grpc-bidi", None]: raise NotImplementedError( @@ -391,19 +415,20 @@ def _on_backoff(retry_state: RetryState) -> None: ) # gRPC-bidi doesn't have the concept of node_id, # so we set it to -1 - node_state = NodeState( + run_info_store = DeprecatedRunInfoStore( node_id=-1, node_config={}, ) else: # Call create_node fn to register node - node_id: Optional[int] = ( # pylint: disable=assignment-from-none - create_node() - ) # pylint: disable=not-callable - if node_id is None: - raise ValueError("Node registration failed") - node_state = NodeState( - node_id=node_id, + # and store node_id in state + if (node_id := create_node()) is None: + raise ValueError( + "Failed to register SuperNode with the SuperLink" + ) + state.set_node_id(node_id) + run_info_store = DeprecatedRunInfoStore( + node_id=state.get_node_id(), node_config=node_config, ) @@ -445,7 +470,7 @@ def _on_backoff(retry_state: RetryState) -> None: runs[run_id] = get_run(run_id) # If get_run is None, i.e., in grpc-bidi mode else: - runs[run_id] = Run(run_id, "", "", "", {}) + runs[run_id] = Run.create_empty(run_id=run_id) run: Run = runs[run_id] if get_fab is not None and run.fab_hash: @@ -461,7 +486,7 @@ def _on_backoff(retry_state: RetryState) -> None: run.fab_id, run.fab_version = fab_id, fab_version # Register context for this run - node_state.register_context( + run_info_store.register_context( run_id=run_id, run=run, flwr_path=flwr_path, @@ -469,7 +494,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) # Retrieve context for this run - context = node_state.retrieve_context(run_id=run_id) + context = run_info_store.retrieve_context(run_id=run_id) # Create an error reply message that will never be used to prevent # the used-before-assignment linting error reply_message = message.create_error_reply( @@ -505,14 +530,24 @@ def _on_backoff(retry_state: RetryState) -> None: ) if start_subprocess: + _octet, _colon, _port = ( + clientappio_api_address.rpartition(":") + ) + io_address = ( + f"{CLIENT_OCTET}:{_port}" + if _octet == SERVER_OCTET + else clientappio_api_address + ) # Start ClientApp subprocess command = [ "flwr-clientapp", - "--supernode", - supernode_address, + "--clientappio-api-address", + io_address, "--token", str(token), ] + command.append("--insecure") + subprocess.run( command, stdout=None, @@ -542,7 +577,7 @@ def _on_backoff(retry_state: RetryState) -> None: # Raise exception, crash process raise ex - # Don't update/change NodeState + # Don't update/change DeprecatedRunInfoStore e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION # Ex fmt: ":<'division by zero'>" @@ -567,7 +602,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) else: # No exception, update node state - node_state.update_context( + run_info_store.update_context( run_id=run_id, context=context, ) @@ -780,7 +815,10 @@ def signal_handler(sig, frame): # type: ignore signal.signal(signal.SIGTERM, signal_handler) -def run_clientappio_api_grpc(address: str) -> tuple[grpc.Server, ClientAppIoServicer]: +def run_clientappio_api_grpc( + address: str, + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> tuple[grpc.Server, ClientAppIoServicer]: """Run ClientAppIo API gRPC server.""" clientappio_servicer: grpc.Server = ClientAppIoServicer() clientappio_add_servicer_to_server_fn = add_ClientAppIoServicer_to_server @@ -791,6 +829,7 @@ def run_clientappio_api_grpc(address: str) -> tuple[grpc.Server, ClientAppIoServ ), server_address=address, max_message_length=GRPC_MAX_MESSAGE_LENGTH, + certificates=certificates, ) log(INFO, "Starting Flower ClientAppIo gRPC server on %s", address) clientappio_grpc_server.start() diff --git a/src/py/flwr/client/clientapp/app.py b/src/py/flwr/client/clientapp/app.py index 52be2a4b6dc1..66cc4d8277ee 100644 --- a/src/py/flwr/client/clientapp/app.py +++ b/src/py/flwr/client/clientapp/app.py @@ -15,6 +15,7 @@ """Flower ClientApp process.""" import argparse +import sys import time from logging import DEBUG, ERROR, INFO from typing import Optional @@ -24,7 +25,9 @@ from flwr.cli.install import install_from_fab from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.common import Context, Message -from flwr.common.constant import ErrorCode +from flwr.common.args import add_args_flwr_app_common +from flwr.common.config import get_flwr_dir +from flwr.common.constant import CLIENTAPPIO_API_DEFAULT_CLIENT_ADDRESS, ErrorCode from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.message import Error @@ -54,32 +57,30 @@ def flwr_clientapp() -> None: """Run process-isolated Flower ClientApp.""" - log(INFO, "Starting Flower ClientApp") - - parser = argparse.ArgumentParser( - description="Run a Flower ClientApp", - ) - parser.add_argument( - "--supernode", - type=str, - help="Address of SuperNode ClientAppIo gRPC servicer", - ) - parser.add_argument( - "--token", - type=int, - required=False, - help="Unique token generated by SuperNode for each ClientApp execution", - ) - args = parser.parse_args() + args = _parse_args_run_flwr_clientapp().parse_args() + if not args.insecure: + log( + ERROR, + "flwr-clientapp does not support TLS yet. " + "Please use the '--insecure' flag.", + ) + sys.exit(1) + log(INFO, "Starting Flower ClientApp") log( DEBUG, - "Staring isolated `ClientApp` connected to SuperNode ClientAppIo at %s " + "Starting isolated `ClientApp` connected to SuperNode's ClientAppIo API at %s " "with token %s", - args.supernode, + args.clientappio_api_address, args.token, ) - run_clientapp(supernode=args.supernode, token=args.token) + run_clientapp( + clientappio_api_address=args.clientappio_api_address, + run_once=(args.token is not None), + token=args.token, + flwr_dir=args.flwr_dir, + certificates=None, + ) def on_channel_state_change(channel_connectivity: str) -> None: @@ -88,28 +89,26 @@ def on_channel_state_change(channel_connectivity: str) -> None: def run_clientapp( # pylint: disable=R0914 - supernode: str, + clientappio_api_address: str, + run_once: bool, token: Optional[int] = None, + flwr_dir: Optional[str] = None, + certificates: Optional[bytes] = None, ) -> None: - """Run Flower ClientApp process. - - Parameters - ---------- - supernode : str - Address of SuperNode - token : Optional[int] (default: None) - Unique SuperNode token for ClientApp-SuperNode authentication - """ + """Run Flower ClientApp process.""" channel = create_channel( - server_address=supernode, - insecure=True, + server_address=clientappio_api_address, + insecure=(certificates is None), + root_certificates=certificates, ) channel.subscribe(on_channel_state_change) + # Resolve directory where FABs are installed + flwr_dir_ = get_flwr_dir(flwr_dir) + try: stub = ClientAppIoStub(channel) - only_once = token is not None while True: # If token is not set, loop until token is received from SuperNode while token is None: @@ -122,13 +121,13 @@ def run_clientapp( # pylint: disable=R0914 # Install FAB, if provided if fab: log(DEBUG, "Flower ClientApp starts FAB installation.") - install_from_fab(fab.content, flwr_dir=None, skip_prompt=True) + install_from_fab(fab.content, flwr_dir=flwr_dir_, skip_prompt=True) load_client_app_fn = get_load_client_app_fn( default_app_ref="", app_path=None, multi_app=True, - flwr_dir=None, + flwr_dir=str(flwr_dir_), ) try: @@ -170,7 +169,7 @@ def run_clientapp( # pylint: disable=R0914 # Stop the loop if `flwr-clientapp` is expected to process only a single # message - if only_once: + if run_once: break except KeyboardInterrupt: @@ -233,3 +232,25 @@ def push_message( except grpc.RpcError as e: log(ERROR, "[PushClientAppOutputs] gRPC error occurred: %s", str(e)) raise e + + +def _parse_args_run_flwr_clientapp() -> argparse.ArgumentParser: + """Parse flwr-clientapp command line arguments.""" + parser = argparse.ArgumentParser( + description="Run a Flower ClientApp", + ) + parser.add_argument( + "--clientappio-api-address", + default=CLIENTAPPIO_API_DEFAULT_CLIENT_ADDRESS, + type=str, + help="Address of SuperNode's ClientAppIo API (IPv4, IPv6, or a domain name)." + f"By default, it is set to {CLIENTAPPIO_API_DEFAULT_CLIENT_ADDRESS}.", + ) + parser.add_argument( + "--token", + type=int, + required=False, + help="Unique token generated by SuperNode for each ClientApp execution", + ) + add_args_flwr_app_common(parser=parser) + return parser diff --git a/src/py/flwr/client/clientapp/clientappio_servicer_test.py b/src/py/flwr/client/clientapp/clientappio_servicer_test.py index a03400c12a86..b32bd2133d74 100644 --- a/src/py/flwr/client/clientapp/clientappio_servicer_test.py +++ b/src/py/flwr/client/clientapp/clientappio_servicer_test.py @@ -36,7 +36,7 @@ ) from flwr.proto.message_pb2 import Context as ProtoContext from flwr.proto.run_pb2 import Run as ProtoRun -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .clientappio_servicer import ClientAppInputs, ClientAppIoServicer, ClientAppOutputs @@ -66,6 +66,7 @@ def test_set_inputs(self) -> None: content=self.maker.recordset(2, 2, 1), ) context = Context( + run_id=1, node_id=1, node_config={"nodeconfig1": 4.2}, state=self.maker.recordset(2, 2, 1), @@ -77,6 +78,11 @@ def test_set_inputs(self) -> None: fab_version="ipsum", fab_hash="dolor", override_config=self.maker.user_config(), + pending_at="2021-01-01T00:00:00Z", + starting_at="", + running_at="", + finished_at="", + status=typing.RunStatus(status="pending", sub_status="", details=""), ) fab = typing.Fab( hash_str="abc123#$%", @@ -122,6 +128,7 @@ def test_get_outputs(self) -> None: content=self.maker.recordset(2, 2, 1), ) context = Context( + run_id=1, node_id=1, node_config={"nodeconfig1": 4.2}, state=self.maker.recordset(2, 2, 1), @@ -186,6 +193,7 @@ def test_push_clientapp_outputs(self) -> None: content=self.maker.recordset(2, 2, 1), ) context = Context( + run_id=1, node_id=1, node_config={"nodeconfig1": 4.2}, state=self.maker.recordset(2, 2, 1), diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index bfc20eee896a..6d6d24d54ea3 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -41,11 +41,7 @@ from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker -from flwr.common.serde import ( - message_from_taskins, - message_to_taskres, - user_config_from_proto, -) +from flwr.common.serde import message_from_taskins, message_to_taskres, run_from_proto from flwr.common.typing import Fab, Run from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 @@ -159,6 +155,11 @@ def grpc_request_response( # pylint: disable=R0913,R0914,R0915,R0917 ping_thread: Optional[threading.Thread] = None ping_stop_event = threading.Event() + # Restrict retries to cases where the status code is UNAVAILABLE + retry_invoker.should_giveup = ( + lambda e: e.code() != grpc.StatusCode.UNAVAILABLE # type: ignore + ) + ########################################################################### # ping/create_node/delete_node/receive/send/get_run functions ########################################################################### @@ -287,13 +288,7 @@ def get_run(run_id: int) -> Run: ) # Return fab_id and fab_version - return Run( - run_id, - get_run_response.run.fab_id, - get_run_response.run.fab_version, - get_run_response.run.fab_hash, - user_config_from_proto(get_run_response.run.override_config), - ) + return run_from_proto(get_run_response.run) def get_fab(fab_hash: str) -> Fab: # Call FleetAPI diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 311f8c37e1b1..0be5ab30e026 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -142,7 +142,9 @@ def test_client_without_get_properties() -> None: actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), message=message, - context=Context(node_id=1123, node_config={}, state=RecordSet(), run_config={}), + context=Context( + run_id=2234, node_id=1123, node_config={}, state=RecordSet(), run_config={} + ), ) # Assert @@ -206,7 +208,9 @@ def test_client_with_get_properties() -> None: actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), message=message, - context=Context(node_id=1123, node_config={}, state=RecordSet(), run_config={}), + context=Context( + run_id=2234, node_id=1123, node_config={}, state=RecordSet(), run_config={} + ), ) # Assert diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index e68bf5177797..89729bca1b9c 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -74,6 +74,7 @@ def func(configs: dict[str, ConfigsRecordValues]) -> ConfigsRecord: def _make_ctxt() -> Context: cfg = ConfigsRecord(SecAggPlusState().to_dict()) return Context( + run_id=234, node_id=123, node_config={}, state=RecordSet(configs_records={RECORD_KEY_STATE: cfg}), diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index e75fb5530b2c..248ee5bdae81 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -104,7 +104,9 @@ def test_multiple_mods(self) -> None: state = RecordSet() state.metrics_records[METRIC] = MetricsRecord({COUNTER: 0.0}) - context = Context(node_id=0, node_config={}, state=state, run_config={}) + context = Context( + run_id=1, node_id=0, node_config={}, state=state, run_config={} + ) message = _get_dummy_flower_message() # Execute @@ -129,7 +131,9 @@ def test_filter(self) -> None: # Prepare footprint: list[str] = [] mock_app = make_mock_app("app", footprint) - context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) + context = Context( + run_id=1, node_id=0, node_config={}, state=RecordSet(), run_config={} + ) message = _get_dummy_flower_message() def filter_mod( diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_test.py similarity index 78% rename from src/py/flwr/client/node_state_tests.py rename to src/py/flwr/client/node_state_test.py index 26ac4fea6855..ff6416a02d27 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ from typing import cast -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ConfigsRecord, Context from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 @@ -34,32 +34,31 @@ def _run_dummy_task(context: Context) -> Context: def test_multirun_in_node_state() -> None: - """Test basic NodeState logic.""" + """Test basic DeprecatedRunInfoStore logic.""" # Tasks to perform tasks = [TaskIns(run_id=run_id) for run_id in [0, 1, 1, 2, 3, 2, 1, 5]] # the "tasks" is to count how many times each run is executed expected_values = {0: "1", 1: "1" * 3, 2: "1" * 2, 3: "1", 5: "1"} - # NodeState - node_state = NodeState(node_id=0, node_config={}) + node_info_store = DeprecatedRunInfoStore(node_id=0, node_config={}) for task in tasks: run_id = task.run_id # Register - node_state.register_context(run_id=run_id) + node_info_store.register_context(run_id=run_id) # Get run state - context = node_state.retrieve_context(run_id=run_id) + context = node_info_store.retrieve_context(run_id=run_id) # Run "task" updated_state = _run_dummy_task(context) # Update run state - node_state.update_context(run_id=run_id, context=updated_state) + node_info_store.update_context(run_id=run_id, context=updated_state) # Verify values - for run_id, run_info in node_state.run_infos.items(): + for run_id, run_info in node_info_store.run_infos.items(): assert ( run_info.context.state.configs_records["counter"]["count"] == expected_values[run_id] diff --git a/src/py/flwr/client/nodestate/__init__.py b/src/py/flwr/client/nodestate/__init__.py new file mode 100644 index 000000000000..a207e1d81948 --- /dev/null +++ b/src/py/flwr/client/nodestate/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower NodeState.""" + +from .in_memory_nodestate import InMemoryNodeState as InMemoryNodeState +from .nodestate import NodeState as NodeState +from .nodestate_factory import NodeStateFactory as NodeStateFactory + +__all__ = [ + "InMemoryNodeState", + "NodeState", + "NodeStateFactory", +] diff --git a/src/py/flwr/client/nodestate/in_memory_nodestate.py b/src/py/flwr/client/nodestate/in_memory_nodestate.py new file mode 100644 index 000000000000..fd88f3af28c7 --- /dev/null +++ b/src/py/flwr/client/nodestate/in_memory_nodestate.py @@ -0,0 +1,38 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""In-memory NodeState implementation.""" + + +from typing import Optional + +from flwr.client.nodestate.nodestate import NodeState + + +class InMemoryNodeState(NodeState): + """In-memory NodeState implementation.""" + + def __init__(self) -> None: + # Store node_id + self.node_id: Optional[int] = None + + def set_node_id(self, node_id: Optional[int]) -> None: + """Set the node ID.""" + self.node_id = node_id + + def get_node_id(self) -> int: + """Get the node ID.""" + if self.node_id is None: + raise ValueError("Node ID not set") + return self.node_id diff --git a/src/py/flwr/client/nodestate/nodestate.py b/src/py/flwr/client/nodestate/nodestate.py new file mode 100644 index 000000000000..6ae30f49fcc1 --- /dev/null +++ b/src/py/flwr/client/nodestate/nodestate.py @@ -0,0 +1,30 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Abstract base class NodeState.""" + +import abc +from typing import Optional + + +class NodeState(abc.ABC): + """Abstract NodeState.""" + + @abc.abstractmethod + def set_node_id(self, node_id: Optional[int]) -> None: + """Set the node ID.""" + + @abc.abstractmethod + def get_node_id(self) -> int: + """Get the node ID.""" diff --git a/src/py/flwr/client/nodestate/nodestate_factory.py b/src/py/flwr/client/nodestate/nodestate_factory.py new file mode 100644 index 000000000000..3d52f0272bd4 --- /dev/null +++ b/src/py/flwr/client/nodestate/nodestate_factory.py @@ -0,0 +1,37 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Factory class that creates NodeState instances.""" + +import threading +from typing import Optional + +from .in_memory_nodestate import InMemoryNodeState +from .nodestate import NodeState + + +class NodeStateFactory: + """Factory class that creates NodeState instances.""" + + def __init__(self) -> None: + self.state_instance: Optional[NodeState] = None + self.lock = threading.RLock() + + def state(self) -> NodeState: + """Return a State instance and create it, if necessary.""" + # Lock access to NodeStateFactory to prevent returning different instances + with self.lock: + if self.state_instance is None: + self.state_instance = InMemoryNodeState() + return self.state_instance diff --git a/src/py/flwr/client/nodestate/nodestate_test.py b/src/py/flwr/client/nodestate/nodestate_test.py new file mode 100644 index 000000000000..f7088b1f8ac6 --- /dev/null +++ b/src/py/flwr/client/nodestate/nodestate_test.py @@ -0,0 +1,69 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests all NodeState implementations have to conform to.""" + +import unittest +from abc import abstractmethod + +from flwr.client.nodestate import InMemoryNodeState, NodeState + + +class StateTest(unittest.TestCase): + """Test all state implementations.""" + + # This is to True in each child class + __test__ = False + + @abstractmethod + def state_factory(self) -> NodeState: + """Provide state implementation to test.""" + raise NotImplementedError() + + def test_get_set_node_id(self) -> None: + """Test set_node_id.""" + # Prepare + state: NodeState = self.state_factory() + node_id = 123 + + # Execute + state.set_node_id(node_id) + + retrieved_node_id = state.get_node_id() + + # Assert + assert node_id == retrieved_node_id + + def test_get_node_id_fails(self) -> None: + """Test get_node_id fails correctly if node_id is not set.""" + # Prepare + state: NodeState = self.state_factory() + + # Execute and assert + with self.assertRaises(ValueError): + state.get_node_id() + + +class InMemoryStateTest(StateTest): + """Test InMemoryState implementation.""" + + __test__ = True + + def state_factory(self) -> NodeState: + """Return InMemoryState.""" + return InMemoryNodeState() + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index f933ae44ad06..7f6c247d8907 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -41,11 +41,7 @@ from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker -from flwr.common.serde import ( - message_from_taskins, - message_to_taskres, - user_config_from_proto, -) +from flwr.common.serde import message_from_taskins, message_to_taskres, run_from_proto from flwr.common.typing import Fab, Run from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 @@ -361,15 +357,9 @@ def get_run(run_id: int) -> Run: # Send the request res = _request(req, GetRunResponse, PATH_GET_RUN) if res is None: - return Run(run_id, "", "", "", {}) - - return Run( - run_id, - res.run.fab_id, - res.run.fab_version, - res.run.fab_hash, - user_config_from_proto(res.run.override_config), - ) + return Run.create_empty(run_id) + + return run_from_proto(res.run) def get_fab(fab_hash: str) -> Fab: # Construct the request diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/run_info_store.py similarity index 96% rename from src/py/flwr/client/node_state.py rename to src/py/flwr/client/run_info_store.py index 843c9890c5d2..a5cd5129bc3a 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/run_info_store.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Node state.""" +"""Deprecated Run Info Store.""" from dataclasses import dataclass @@ -36,7 +36,7 @@ class RunInfo: initial_run_config: UserConfig -class NodeState: +class DeprecatedRunInfoStore: """State of a node where client nodes execute runs.""" def __init__( @@ -83,6 +83,7 @@ def register_context( self.run_infos[run_id] = RunInfo( initial_run_config=initial_run_config, context=Context( + run_id=run_id, node_id=self.node_id, node_config=self.node_config, state=RecordSet(), diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py index 4ddfe5d40aa3..e5b9f22ab122 100644 --- a/src/py/flwr/client/supernode/app.py +++ b/src/py/flwr/client/supernode/app.py @@ -28,9 +28,13 @@ ) from flwr.common import EventType, event +from flwr.common.args import try_obtain_root_certificates from flwr.common.config import parse_config_args from flwr.common.constant import ( + CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS, FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, @@ -38,11 +42,7 @@ from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log, warn_deprecated_feature -from ..app import ( - ISOLATION_MODE_PROCESS, - ISOLATION_MODE_SUBPROCESS, - start_client_internal, -) +from ..app import start_client_internal from ..clientapp.utils import get_load_client_app_fn @@ -63,10 +63,21 @@ def run_supernode() -> None: "Ignoring `--flwr-dir`.", ) - root_certificates = _get_certificates(args) + # Exit if unsupported argument is passed by the user + if args.app is not None: + log( + ERROR, + "The `app` argument is deprecated. The SuperNode now automatically " + "uses the ClientApp delivered from the SuperLink. Providing the app " + "directory manually is no longer supported. Please remove the `app` " + "argument from your command.", + ) + sys.exit(1) + + root_certificates = try_obtain_root_certificates(args, args.superlink) load_fn = get_load_client_app_fn( default_app_ref="", - app_path=args.app, + app_path=None, flwr_dir=args.flwr_dir, multi_app=True, ) @@ -88,7 +99,7 @@ def run_supernode() -> None: ), flwr_path=args.flwr_dir, isolation=args.isolation, - supernode_address=args.supernode_address, + clientappio_api_address=args.clientappio_api_address, ) # Graceful shutdown @@ -128,41 +139,6 @@ def _warn_deprecated_server_arg(args: argparse.Namespace) -> None: args.superlink = args.server -def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: - """Load certificates if specified in args.""" - # Obtain certificates - if args.insecure: - if args.root_certificates is not None: - sys.exit( - "Conflicting options: The '--insecure' flag disables HTTPS, " - "but '--root-certificates' was also specified. Please remove " - "the '--root-certificates' option when running in insecure mode, " - "or omit '--insecure' to use HTTPS." - ) - log( - WARN, - "Option `--insecure` was set. " - "Starting insecure HTTP client connected to %s.", - args.superlink, - ) - root_certificates = None - else: - # Load the certificates if provided, or load the system certificates - cert_path = args.root_certificates - if cert_path is None: - root_certificates = None - else: - root_certificates = Path(cert_path).read_bytes() - log( - DEBUG, - "Starting secure HTTPS client connected to %s " - "with the following certificates: %s.", - args.superlink, - cert_path, - ) - return root_certificates - - def _parse_args_run_supernode() -> argparse.ArgumentParser: """Parse flower-supernode command line arguments.""" parser = argparse.ArgumentParser( @@ -173,12 +149,12 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser: "app", nargs="?", default=None, - help="Specify the path of the Flower App to load and run the `ClientApp`. " - "The `pyproject.toml` file must be located in the root of this path. " - "When this argument is provided, the SuperNode will exclusively respond to " - "messages from the corresponding `ServerApp` by matching the FAB ID and FAB " - "version. An error will be raised if a message is received from any other " - "`ServerApp`.", + help=( + "(REMOVED) This argument is removed. The SuperNode now automatically " + "uses the ClientApp delivered from the SuperLink, so there is no need to " + "provide the app directory manually. This argument will be removed in a " + "future version." + ), ) _parse_args_common(parser) parser.add_argument( @@ -194,22 +170,22 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser: ) parser.add_argument( "--isolation", - default=None, + default=ISOLATION_MODE_SUBPROCESS, required=False, choices=[ ISOLATION_MODE_SUBPROCESS, ISOLATION_MODE_PROCESS, ], - help="Isolation mode when running `ClientApp` (optional, possible values: " - "`subprocess`, `process`). By default, `ClientApp` runs in the same process " - "that executes the SuperNode. Use `subprocess` to configure SuperNode to run " - "`ClientApp` in a subprocess. Use `process` to indicate that a separate " - "independent process gets created outside of SuperNode.", + help="Isolation mode when running a `ClientApp` (`subprocess` by default, " + "possible values: `subprocess`, `process`). Use `subprocess` to configure " + "SuperNode to run a `ClientApp` in a subprocess. Use `process` to indicate " + "that a separate independent process gets created outside of SuperNode.", ) parser.add_argument( - "--supernode-address", - default="0.0.0.0:9094", - help="Set the SuperNode gRPC server address. Defaults to `0.0.0.0:9094`.", + "--clientappio-api-address", + default=CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS, + help="ClientAppIo API (gRPC) server address (IPv4, IPv6, or a domain name). " + f"By default, it is set to {CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS}.", ) return parser diff --git a/src/py/flwr/common/args.py b/src/py/flwr/common/args.py new file mode 100644 index 000000000000..3c27f3840632 --- /dev/null +++ b/src/py/flwr/common/args.py @@ -0,0 +1,152 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common Flower arguments.""" + +import argparse +import sys +from logging import DEBUG, ERROR, WARN +from os.path import isfile +from pathlib import Path +from typing import Optional + +from flwr.common.constant import ( + TRANSPORT_TYPE_GRPC_ADAPTER, + TRANSPORT_TYPE_GRPC_RERE, + TRANSPORT_TYPE_REST, +) +from flwr.common.logger import log + + +def add_args_flwr_app_common(parser: argparse.ArgumentParser) -> None: + """Add common Flower arguments for flwr-*app to the provided parser.""" + parser.add_argument( + "--flwr-dir", + default=None, + help="""The path containing installed Flower Apps. + By default, this value is equal to: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, + ) + parser.add_argument( + "--insecure", + action="store_true", + help="Run the server without HTTPS, regardless of whether certificate " + "paths are provided. By default, the server runs with HTTPS enabled. " + "Use this flag only if you understand the risks.", + ) + + +def try_obtain_root_certificates( + args: argparse.Namespace, + grpc_server_address: str, +) -> Optional[bytes]: + """Validate and return the root certificates.""" + root_cert_path = args.root_certificates + if args.insecure: + if root_cert_path is not None: + sys.exit( + "Conflicting options: The '--insecure' flag disables HTTPS, " + "but '--root-certificates' was also specified. Please remove " + "the '--root-certificates' option when running in insecure mode, " + "or omit '--insecure' to use HTTPS." + ) + log( + WARN, + "Option `--insecure` was set. Starting insecure HTTP channel to %s.", + grpc_server_address, + ) + root_certificates = None + else: + # Load the certificates if provided, or load the system certificates + if root_cert_path is None: + log( + WARN, + "Both `--insecure` and `--root-certificates` were not set. " + "Using system certificates.", + ) + root_certificates = None + elif not isfile(root_cert_path): + log(ERROR, "Path argument `--root-certificates` does not point to a file.") + sys.exit(1) + else: + root_certificates = Path(root_cert_path).read_bytes() + log( + DEBUG, + "Starting secure HTTPS channel to %s " + "with the following certificates: %s.", + grpc_server_address, + root_cert_path, + ) + return root_certificates + + +def try_obtain_server_certificates( + args: argparse.Namespace, + transport_type: str, +) -> Optional[tuple[bytes, bytes, bytes]]: + """Validate and return the CA cert, server cert, and server private key.""" + if args.insecure: + log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") + return None + # Check if certificates are provided + if transport_type in [TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_GRPC_ADAPTER]: + if args.ssl_certfile and args.ssl_keyfile and args.ssl_ca_certfile: + if not isfile(args.ssl_ca_certfile): + sys.exit("Path argument `--ssl-ca-certfile` does not point to a file.") + if not isfile(args.ssl_certfile): + sys.exit("Path argument `--ssl-certfile` does not point to a file.") + if not isfile(args.ssl_keyfile): + sys.exit("Path argument `--ssl-keyfile` does not point to a file.") + certificates = ( + Path(args.ssl_ca_certfile).read_bytes(), # CA certificate + Path(args.ssl_certfile).read_bytes(), # server certificate + Path(args.ssl_keyfile).read_bytes(), # server private key + ) + return certificates + if args.ssl_certfile or args.ssl_keyfile or args.ssl_ca_certfile: + sys.exit( + "You need to provide valid file paths to `--ssl-certfile`, " + "`--ssl-keyfile`, and `—-ssl-ca-certfile` to create a secure " + "connection in Fleet API server (gRPC-rere)." + ) + if transport_type == TRANSPORT_TYPE_REST: + if args.ssl_certfile and args.ssl_keyfile: + if not isfile(args.ssl_certfile): + sys.exit("Path argument `--ssl-certfile` does not point to a file.") + if not isfile(args.ssl_keyfile): + sys.exit("Path argument `--ssl-keyfile` does not point to a file.") + certificates = ( + b"", + Path(args.ssl_certfile).read_bytes(), # server certificate + Path(args.ssl_keyfile).read_bytes(), # server private key + ) + return certificates + if args.ssl_certfile or args.ssl_keyfile: + sys.exit( + "You need to provide valid file paths to `--ssl-certfile` " + "and `--ssl-keyfile` to create a secure connection " + "in Fleet API server (REST, experimental)." + ) + log( + ERROR, + "Certificates are required unless running in insecure mode. " + "Please provide certificate paths to `--ssl-certfile`, " + "`--ssl-keyfile`, and `—-ssl-ca-certfile` or run the server " + "in insecure mode using '--insecure' if you understand the risks.", + ) + sys.exit(1) diff --git a/src/py/flwr/common/config.py b/src/py/flwr/common/config.py index 24ccada7509a..e7f71a40951c 100644 --- a/src/py/flwr/common/config.py +++ b/src/py/flwr/common/config.py @@ -22,6 +22,7 @@ import tomli from flwr.cli.config_utils import get_fab_config, validate_fields +from flwr.common import ConfigsRecord from flwr.common.constant import ( APP_DIR, FAB_CONFIG_FILE, @@ -229,3 +230,12 @@ def get_metadata_from_config(config: dict[str, Any]) -> tuple[str, str]: config["project"]["version"], f"{config['tool']['flwr']['app']['publisher']}/{config['project']['name']}", ) + + +def user_config_to_configsrecord(config: UserConfig) -> ConfigsRecord: + """Construct a `ConfigsRecord` out of a `UserConfig`.""" + c_record = ConfigsRecord() + for k, v in config.items(): + c_record[k] = v + + return c_record diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index e99e0edaacd4..ec84aa984fde 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -38,17 +38,30 @@ ] # Addresses +# Ports +CLIENTAPPIO_PORT = "9094" +SERVERAPPIO_PORT = "9091" +FLEETAPI_GRPC_RERE_PORT = "9092" +FLEETAPI_PORT = "9095" +EXEC_API_PORT = "9093" +SIMULATIONIO_PORT = "9096" +# Octets +SERVER_OCTET = "0.0.0.0" +CLIENT_OCTET = "127.0.0.1" # SuperNode -CLIENTAPPIO_API_DEFAULT_ADDRESS = "0.0.0.0:9094" -# SuperExec -EXEC_API_DEFAULT_ADDRESS = "0.0.0.0:9093" +CLIENTAPPIO_API_DEFAULT_SERVER_ADDRESS = f"{SERVER_OCTET}:{CLIENTAPPIO_PORT}" +CLIENTAPPIO_API_DEFAULT_CLIENT_ADDRESS = f"{CLIENT_OCTET}:{CLIENTAPPIO_PORT}" # SuperLink -DRIVER_API_DEFAULT_ADDRESS = "0.0.0.0:9091" -FLEET_API_GRPC_RERE_DEFAULT_ADDRESS = "0.0.0.0:9092" +SERVERAPPIO_API_DEFAULT_SERVER_ADDRESS = f"{SERVER_OCTET}:{SERVERAPPIO_PORT}" +SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS = f"{CLIENT_OCTET}:{SERVERAPPIO_PORT}" +FLEET_API_GRPC_RERE_DEFAULT_ADDRESS = f"{SERVER_OCTET}:{FLEETAPI_GRPC_RERE_PORT}" FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS = ( "[::]:8080" # IPv6 to keep start_server compatible ) -FLEET_API_REST_DEFAULT_ADDRESS = "0.0.0.0:9093" +FLEET_API_REST_DEFAULT_ADDRESS = f"{SERVER_OCTET}:{FLEETAPI_PORT}" +EXEC_API_DEFAULT_SERVER_ADDRESS = f"{SERVER_OCTET}:{EXEC_API_PORT}" +SIMULATIONIO_API_DEFAULT_SERVER_ADDRESS = f"{SERVER_OCTET}:{SIMULATIONIO_PORT}" +SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS = f"{CLIENT_OCTET}:{SIMULATIONIO_PORT}" # Constants for ping PING_DEFAULT_INTERVAL = 30 @@ -84,6 +97,19 @@ # Message TTL MESSAGE_TTL_TOLERANCE = 1e-1 +# Isolation modes +ISOLATION_MODE_SUBPROCESS = "subprocess" +ISOLATION_MODE_PROCESS = "process" + +# Log streaming configurations +CONN_REFRESH_PERIOD = 60 # Stream connection refresh period +CONN_RECONNECT_INTERVAL = 0.5 # Reconnect interval between two stream connections +LOG_STREAM_INTERVAL = 0.5 # Log stream interval for `ExecServicer.StreamLogs` +LOG_UPLOAD_INTERVAL = 0.2 # Minimum interval between two log uploads + +# Retry configurations +MAX_RETRY_DELAY = 20 # Maximum delay duration between two consecutive retries. + class MessageType: """Message type.""" @@ -124,8 +150,34 @@ class ErrorCode: UNKNOWN = 0 LOAD_CLIENT_APP_EXCEPTION = 1 CLIENT_APP_RAISED_EXCEPTION = 2 - NODE_UNAVAILABLE = 3 + MESSAGE_UNAVAILABLE = 3 + REPLY_MESSAGE_UNAVAILABLE = 4 def __new__(cls) -> ErrorCode: """Prevent instantiation.""" raise TypeError(f"{cls.__name__} cannot be instantiated.") + + +class Status: + """Run status.""" + + PENDING = "pending" + STARTING = "starting" + RUNNING = "running" + FINISHED = "finished" + + def __new__(cls) -> Status: + """Prevent instantiation.""" + raise TypeError(f"{cls.__name__} cannot be instantiated.") + + +class SubStatus: + """Run sub-status.""" + + COMPLETED = "completed" + FAILED = "failed" + STOPPED = "stopped" + + def __new__(cls) -> SubStatus: + """Prevent instantiation.""" + raise TypeError(f"{cls.__name__} cannot be instantiated.") diff --git a/src/py/flwr/common/context.py b/src/py/flwr/common/context.py index 1544b96d3fa3..edf2024c2b1c 100644 --- a/src/py/flwr/common/context.py +++ b/src/py/flwr/common/context.py @@ -27,36 +27,41 @@ class Context: Parameters ---------- + run_id : int + The ID that identifies the run. node_id : int The ID that identifies the node. node_config : UserConfig A config (key/value mapping) unique to the node and independent of the `run_config`. This config persists across all runs this node participates in. state : RecordSet - Holds records added by the entity in a given run and that will stay local. + Holds records added by the entity in a given `run_id` and that will stay local. This means that the data it holds will never leave the system it's running from. This can be used as an intermediate storage or scratchpad when executing mods. It can also be used as a memory to access at different points during the lifecycle of this entity (e.g. across multiple rounds) run_config : UserConfig - A config (key/value mapping) held by the entity in a given run and that will - stay local. It can be used at any point during the lifecycle of this entity + A config (key/value mapping) held by the entity in a given `run_id` and that + will stay local. It can be used at any point during the lifecycle of this entity (e.g. across multiple rounds) """ + run_id: int node_id: int node_config: UserConfig state: RecordSet run_config: UserConfig - def __init__( # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments, too-many-positional-arguments self, + run_id: int, node_id: int, node_config: UserConfig, state: RecordSet, run_config: UserConfig, ) -> None: + self.run_id = run_id self.node_id = node_id self.node_config = node_config self.state = state diff --git a/src/py/flwr/common/date.py b/src/py/flwr/common/date.py index 7f30f5e0591a..28afe3d1ff38 100644 --- a/src/py/flwr/common/date.py +++ b/src/py/flwr/common/date.py @@ -15,9 +15,27 @@ """Flower date utils.""" -from datetime import datetime, timezone +import datetime -def now() -> datetime: +def now() -> datetime.datetime: """Construct a datetime from time.time() with time zone set to UTC.""" - return datetime.now(tz=timezone.utc) + return datetime.datetime.now(tz=datetime.timezone.utc) + + +def format_timedelta(td: datetime.timedelta) -> str: + """Format a timedelta as a string.""" + days = td.days + hours, remainder = divmod(td.seconds, 3600) + minutes, seconds = divmod(remainder, 60) + + if days > 0: + return f"{days}d {hours:02}:{minutes:02}:{seconds:02}" + return f"{hours:02}:{minutes:02}:{seconds:02}" + + +def isoformat8601_utc(dt: datetime.datetime) -> str: + """Return the datetime formatted as an ISO 8601 string with a trailing 'Z'.""" + if dt.tzinfo != datetime.timezone.utc: + raise ValueError("Expected datetime with timezone set to UTC") + return dt.isoformat(timespec="seconds").replace("+00:00", "Z") diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index 5a29c595119c..0c6e2fc5b082 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -53,7 +53,10 @@ def create_channel( channel = grpc.insecure_channel(server_address, options=channel_options) log(DEBUG, "Opened insecure gRPC connection (no certificates were passed)") else: - ssl_channel_credentials = grpc.ssl_channel_credentials(root_certificates) + try: + ssl_channel_credentials = grpc.ssl_channel_credentials(root_certificates) + except Exception as e: + raise ValueError(f"Failed to create SSL channel credentials: {e}") from e channel = grpc.secure_channel( server_address, ssl_channel_credentials, options=channel_options ) diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 3a058abac9c6..7f132d706dc2 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -16,9 +16,22 @@ import logging +import sys +import threading +import time from logging import WARN, LogRecord from logging.handlers import HTTPHandler -from typing import TYPE_CHECKING, Any, Optional, TextIO +from queue import Empty, Queue +from typing import TYPE_CHECKING, Any, Optional, TextIO, Union + +import grpc + +from flwr.proto.log_pb2 import PushLogsRequest # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.serverappio_pb2_grpc import ServerAppIoStub # pylint: disable=E0611 +from flwr.proto.simulationio_pb2_grpc import SimulationIoStub # pylint: disable=E0611 + +from .constant import LOG_UPLOAD_INTERVAL # Create logger LOGGER_NAME = "flwr" @@ -259,3 +272,97 @@ def set_logger_propagation( if not child_logger.propagate: child_logger.log(logging.DEBUG, "Logger propagate set to False") return child_logger + + +def mirror_output_to_queue(log_queue: Queue[Optional[str]]) -> None: + """Mirror stdout and stderr output to the provided queue.""" + + def get_write_fn(stream: TextIO) -> Any: + original_write = stream.write + + def fn(s: str) -> int: + ret = original_write(s) + stream.flush() + log_queue.put(s) + return ret + + return fn + + sys.stdout.write = get_write_fn(sys.stdout) # type: ignore[method-assign] + sys.stderr.write = get_write_fn(sys.stderr) # type: ignore[method-assign] + console_handler.stream = sys.stdout + + +def restore_output() -> None: + """Restore stdout and stderr. + + This will stop mirroring output to queues. + """ + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + console_handler.stream = sys.stdout + + +def _log_uploader( + log_queue: Queue[Optional[str]], node_id: int, run_id: int, stub: ServerAppIoStub +) -> None: + """Upload logs to the SuperLink.""" + exit_flag = False + node = Node(node_id=node_id, anonymous=False) + msgs: list[str] = [] + while True: + # Fetch all messages from the queue + try: + while True: + msg = log_queue.get_nowait() + # Quit the loops if the returned message is `None` + # This is a signal that the run has finished + if msg is None: + exit_flag = True + break + msgs.append(msg) + except Empty: + pass + + # Upload if any logs + if msgs: + req = PushLogsRequest( + node=node, + run_id=run_id, + logs=msgs, + ) + try: + stub.PushLogs(req) + msgs.clear() + except grpc.RpcError as e: + # Ignore minor network errors + # pylint: disable-next=no-member + if e.code() != grpc.StatusCode.UNAVAILABLE: + raise e + + if exit_flag: + break + + time.sleep(LOG_UPLOAD_INTERVAL) + + +def start_log_uploader( + log_queue: Queue[Optional[str]], + node_id: int, + run_id: int, + stub: Union[ServerAppIoStub, SimulationIoStub], +) -> threading.Thread: + """Start the log uploader thread and return it.""" + thread = threading.Thread( + target=_log_uploader, args=(log_queue, node_id, run_id, stub) + ) + thread.start() + return thread + + +def stop_log_uploader( + log_queue: Queue[Optional[str]], log_uploader: threading.Thread +) -> None: + """Stop the log uploader thread.""" + log_queue.put(None) + log_uploader.join() diff --git a/src/py/flwr/common/logger_test.py b/src/py/flwr/common/logger_test.py new file mode 100644 index 000000000000..012353057e86 --- /dev/null +++ b/src/py/flwr/common/logger_test.py @@ -0,0 +1,57 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower Logger tests.""" + + +import sys +from queue import Queue +from typing import Optional + +from .logger import mirror_output_to_queue, restore_output + + +def test_mirror_output_to_queue() -> None: + """Test that stdout and stderr are mirrored to the provided queue.""" + # Prepare + log_queue: Queue[Optional[str]] = Queue() + + # Execute + mirror_output_to_queue(log_queue) + print("Test message") + sys.stderr.write("Error message\n") + + # Assert + assert not log_queue.empty() + assert log_queue.get() == "Test message" + assert log_queue.get() == "\n" + assert log_queue.get() == "Error message\n" + + +def test_restore_output() -> None: + """Test that stdout and stderr are restored after calling restore_output.""" + # Prepare + log_queue: Queue[Optional[str]] = Queue() + + # Execute + mirror_output_to_queue(log_queue) + print("Test message before restore") + restore_output() + print("Test message after restore") + sys.stderr.write("Error message after restore\n") + + # Assert + assert log_queue.get() == "Test message before restore" + assert log_queue.get() == "\n" + assert log_queue.empty() diff --git a/src/py/flwr/common/object_ref.py b/src/py/flwr/common/object_ref.py index 6259b5ab557d..91414ef210f8 100644 --- a/src/py/flwr/common/object_ref.py +++ b/src/py/flwr/common/object_ref.py @@ -55,8 +55,8 @@ def validate( specified attribute within it. project_dir : Optional[Union[str, Path]] (default: None) The directory containing the module. If None, the current working directory - is used. If `check_module` is True, the `project_dir` will be inserted into - the system path, and the previously inserted `project_dir` will be removed. + is used. If `check_module` is True, the `project_dir` will be temporarily + inserted into the system path and then removed after the validation is complete. Returns ------- @@ -66,8 +66,8 @@ def validate( Note ---- - This function will modify `sys.path` by inserting the provided `project_dir` - and removing the previously inserted `project_dir`. + This function will temporarily modify `sys.path` by inserting the provided + `project_dir`, which will be removed after the validation is complete. """ module_str, _, attributes_str = module_attribute_str.partition(":") if not module_str: @@ -82,11 +82,19 @@ def validate( ) if check_module: + if project_dir is None: + project_dir = Path.cwd() + project_dir = Path(project_dir).absolute() # Set the system path - _set_sys_path(project_dir) + sys.path.insert(0, str(project_dir)) # Load module module = find_spec(module_str) + + # Unset the system path + sys.path.remove(str(project_dir)) + + # Check if the module and the attribute exist if module and module.origin: if not _find_attribute_in_module(module.origin, attributes_str): return ( @@ -133,8 +141,10 @@ def load_app( # pylint: disable= too-many-branches Note ---- - This function will modify `sys.path` by inserting the provided `project_dir` - and removing the previously inserted `project_dir`. + - This function will unload all modules in the previously provided `project_dir`, + if it is invoked again. + - This function will modify `sys.path` by inserting the provided `project_dir` + and removing the previously inserted `project_dir`. """ valid, error_msg = validate(module_attribute_str, check_module=False) if not valid and error_msg: @@ -143,8 +153,19 @@ def load_app( # pylint: disable= too-many-branches module_str, _, attributes_str = module_attribute_str.partition(":") try: + # Initialize project path + if project_dir is None: + project_dir = Path.cwd() + project_dir = Path(project_dir).absolute() + + # Unload modules if the project directory has changed + if _current_sys_path and _current_sys_path != str(project_dir): + _unload_modules(Path(_current_sys_path)) + + # Set the system path _set_sys_path(project_dir) + # Import the module if module_str not in sys.modules: module = importlib.import_module(module_str) # Hack: `tabnet` does not work with `importlib.reload` @@ -160,15 +181,7 @@ def load_app( # pylint: disable= too-many-branches module = sys.modules[module_str] else: module = sys.modules[module_str] - - if project_dir is None: - project_dir = Path.cwd() - - # Reload cached modules in the project directory - for m in list(sys.modules.values()): - path: Optional[str] = getattr(m, "__file__", None) - if path is not None and path.startswith(str(project_dir)): - importlib.reload(m) + _reload_modules(project_dir) except ModuleNotFoundError as err: raise error_type( @@ -189,6 +202,24 @@ def load_app( # pylint: disable= too-many-branches return attribute +def _unload_modules(project_dir: Path) -> None: + """Unload modules from the project directory.""" + dir_str = str(project_dir.absolute()) + for name, m in list(sys.modules.items()): + path: Optional[str] = getattr(m, "__file__", None) + if path is not None and path.startswith(dir_str): + del sys.modules[name] + + +def _reload_modules(project_dir: Path) -> None: + """Reload modules from the project directory.""" + dir_str = str(project_dir.absolute()) + for m in list(sys.modules.values()): + path: Optional[str] = getattr(m, "__file__", None) + if path is not None and path.startswith(dir_str): + importlib.reload(m) + + def _set_sys_path(directory: Optional[Union[str, Path]]) -> None: """Set the system path.""" if directory is None: diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 54790992b40d..88d774b4d801 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -40,6 +40,7 @@ from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet from flwr.proto.recordset_pb2 import SintList, StringList, UintList from flwr.proto.run_pb2 import Run as ProtoRun +from flwr.proto.run_pb2 import RunStatus as ProtoRunStatus from flwr.proto.task_pb2 import Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ( ClientMessage, @@ -839,6 +840,7 @@ def message_from_proto(message_proto: ProtoMessage) -> Message: def context_to_proto(context: Context) -> ProtoContext: """Serialize `Context` to ProtoBuf.""" proto = ProtoContext( + run_id=context.run_id, node_id=context.node_id, node_config=user_config_to_proto(context.node_config), state=recordset_to_proto(context.state), @@ -850,6 +852,7 @@ def context_to_proto(context: Context) -> ProtoContext: def context_from_proto(context_proto: ProtoContext) -> Context: """Deserialize `Context` from ProtoBuf.""" context = Context( + run_id=context_proto.run_id, node_id=context_proto.node_id, node_config=user_config_from_proto(context_proto.node_config), state=recordset_from_proto(context_proto.state), @@ -869,6 +872,11 @@ def run_to_proto(run: typing.Run) -> ProtoRun: fab_version=run.fab_version, fab_hash=run.fab_hash, override_config=user_config_to_proto(run.override_config), + pending_at=run.pending_at, + starting_at=run.starting_at, + running_at=run.running_at, + finished_at=run.finished_at, + status=run_status_to_proto(run.status), ) return proto @@ -881,6 +889,11 @@ def run_from_proto(run_proto: ProtoRun) -> typing.Run: fab_version=run_proto.fab_version, fab_hash=run_proto.fab_hash, override_config=user_config_from_proto(run_proto.override_config), + pending_at=run_proto.pending_at, + starting_at=run_proto.starting_at, + running_at=run_proto.running_at, + finished_at=run_proto.finished_at, + status=run_status_from_proto(run_proto.status), ) return run @@ -910,3 +923,24 @@ def clientappstatus_from_proto( if msg.code == ClientAppOutputCode.UNKNOWN_ERROR: code = typing.ClientAppOutputCode.UNKNOWN_ERROR return typing.ClientAppOutputStatus(code=code, message=msg.message) + + +# === Run status === + + +def run_status_to_proto(run_status: typing.RunStatus) -> ProtoRunStatus: + """Serialize `RunStatus` to ProtoBuf.""" + return ProtoRunStatus( + status=run_status.status, + sub_status=run_status.sub_status, + details=run_status.details, + ) + + +def run_status_from_proto(run_status_proto: ProtoRunStatus) -> typing.RunStatus: + """Deserialize `RunStatus` from ProtoBuf.""" + return typing.RunStatus( + status=run_status_proto.status, + sub_status=run_status_proto.sub_status, + details=run_status_proto.details, + ) diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 19e9889158a0..f6e2789ef9c0 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -503,6 +503,7 @@ def test_context_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = Context( + run_id=0, node_id=1, node_config=maker.user_config(), state=maker.recordset(1, 1, 1), @@ -528,6 +529,11 @@ def test_run_serialization_deserialization() -> None: fab_version="ipsum", fab_hash="hash", override_config=maker.user_config(), + pending_at="2021-01-01T00:00:00Z", + starting_at="2021-01-02T23:02:11Z", + running_at="2021-01-03T12:00:50Z", + finished_at="", + status=typing.RunStatus(status="running", sub_status="", details="OK"), ) # Execute diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 724f36d2b98f..e4487e839e8f 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -150,12 +150,6 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: list[A # Not yet implemented - # --- SuperExec -------------------------------------------------------------------- - - # SuperExec - RUN_SUPEREXEC_ENTER = auto() - RUN_SUPEREXEC_LEAVE = auto() - # --- Simulation Engine ------------------------------------------------------------ # CLI: flower-simulation diff --git a/src/py/flwr/common/typing.py b/src/py/flwr/common/typing.py index 081a957f28ff..3c1ed7fafd85 100644 --- a/src/py/flwr/common/typing.py +++ b/src/py/flwr/common/typing.py @@ -24,7 +24,7 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] -NDArrayFloat = npt.NDArray[np.float_] +NDArrayFloat = npt.NDArray[np.float64] NDArrays = list[NDArray] # The following union type contains Python types corresponding to ProtoBuf types that @@ -208,7 +208,16 @@ class ClientMessage: @dataclass -class Run: +class RunStatus: + """Run status information.""" + + status: str + sub_status: str + details: str + + +@dataclass +class Run: # pylint: disable=too-many-instance-attributes """Run details.""" run_id: int @@ -216,6 +225,27 @@ class Run: fab_version: str fab_hash: str override_config: UserConfig + pending_at: str + starting_at: str + running_at: str + finished_at: str + status: RunStatus + + @classmethod + def create_empty(cls, run_id: int) -> "Run": + """Return an empty Run instance.""" + return cls( + run_id=run_id, + fab_id="", + fab_version="", + fab_hash="", + override_config={}, + pending_at="", + starting_at="", + running_at="", + finished_at="", + status=RunStatus(status="", sub_status="", details=""), + ) @dataclass diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py deleted file mode 100644 index d294b03be5af..000000000000 --- a/src/py/flwr/proto/driver_pb2.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: flwr/proto/driver.proto -# Protobuf Python Version: 4.25.0 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 -from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 -from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.driver_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _globals['_GETNODESREQUEST']._serialized_start=129 - _globals['_GETNODESREQUEST']._serialized_end=162 - _globals['_GETNODESRESPONSE']._serialized_start=164 - _globals['_GETNODESRESPONSE']._serialized_end=215 - _globals['_PUSHTASKINSREQUEST']._serialized_start=217 - _globals['_PUSHTASKINSREQUEST']._serialized_end=281 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=283 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=322 - _globals['_PULLTASKRESREQUEST']._serialized_start=324 - _globals['_PULLTASKRESREQUEST']._serialized_end=394 - _globals['_PULLTASKRESRESPONSE']._serialized_start=396 - _globals['_PULLTASKRESRESPONSE']._serialized_end=461 - _globals['_DRIVER']._serialized_start=464 - _globals['_DRIVER']._serialized_end=919 -# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2_grpc.py b/src/py/flwr/proto/driver_pb2_grpc.py deleted file mode 100644 index 91e9fd8b9bdd..000000000000 --- a/src/py/flwr/proto/driver_pb2_grpc.py +++ /dev/null @@ -1,239 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from flwr.proto import driver_pb2 as flwr_dot_proto_dot_driver__pb2 -from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 - - -class DriverStub(object): - """Missing associated documentation comment in .proto file.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateRun = channel.unary_unary( - '/flwr.proto.Driver/CreateRun', - request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, - ) - self.GetNodes = channel.unary_unary( - '/flwr.proto.Driver/GetNodes', - request_serializer=flwr_dot_proto_dot_driver__pb2.GetNodesRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.GetNodesResponse.FromString, - ) - self.PushTaskIns = channel.unary_unary( - '/flwr.proto.Driver/PushTaskIns', - request_serializer=flwr_dot_proto_dot_driver__pb2.PushTaskInsRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.PushTaskInsResponse.FromString, - ) - self.PullTaskRes = channel.unary_unary( - '/flwr.proto.Driver/PullTaskRes', - request_serializer=flwr_dot_proto_dot_driver__pb2.PullTaskResRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.FromString, - ) - self.GetRun = channel.unary_unary( - '/flwr.proto.Driver/GetRun', - request_serializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, - ) - self.GetFab = channel.unary_unary( - '/flwr.proto.Driver/GetFab', - request_serializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, - ) - - -class DriverServicer(object): - """Missing associated documentation comment in .proto file.""" - - def CreateRun(self, request, context): - """Request run_id - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetNodes(self, request, context): - """Return a set of nodes - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PushTaskIns(self, request, context): - """Create one or more tasks - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PullTaskRes(self, request, context): - """Get task results - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetRun(self, request, context): - """Get run details - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetFab(self, request, context): - """Get FAB - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_DriverServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateRun': grpc.unary_unary_rpc_method_handler( - servicer.CreateRun, - request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, - ), - 'GetNodes': grpc.unary_unary_rpc_method_handler( - servicer.GetNodes, - request_deserializer=flwr_dot_proto_dot_driver__pb2.GetNodesRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.GetNodesResponse.SerializeToString, - ), - 'PushTaskIns': grpc.unary_unary_rpc_method_handler( - servicer.PushTaskIns, - request_deserializer=flwr_dot_proto_dot_driver__pb2.PushTaskInsRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.PushTaskInsResponse.SerializeToString, - ), - 'PullTaskRes': grpc.unary_unary_rpc_method_handler( - servicer.PullTaskRes, - request_deserializer=flwr_dot_proto_dot_driver__pb2.PullTaskResRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.SerializeToString, - ), - 'GetRun': grpc.unary_unary_rpc_method_handler( - servicer.GetRun, - request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.SerializeToString, - ), - 'GetFab': grpc.unary_unary_rpc_method_handler( - servicer.GetFab, - request_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.FromString, - response_serializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'flwr.proto.Driver', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class Driver(object): - """Missing associated documentation comment in .proto file.""" - - @staticmethod - def CreateRun(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateRun', - flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, - flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def GetNodes(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/GetNodes', - flwr_dot_proto_dot_driver__pb2.GetNodesRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.GetNodesResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def PushTaskIns(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/PushTaskIns', - flwr_dot_proto_dot_driver__pb2.PushTaskInsRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.PushTaskInsResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def PullTaskRes(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/PullTaskRes', - flwr_dot_proto_dot_driver__pb2.PullTaskResRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def GetRun(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/GetRun', - flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, - flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def GetFab(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/GetFab', - flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, - flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/driver_pb2_grpc.pyi b/src/py/flwr/proto/driver_pb2_grpc.pyi deleted file mode 100644 index 8f665301073d..000000000000 --- a/src/py/flwr/proto/driver_pb2_grpc.pyi +++ /dev/null @@ -1,94 +0,0 @@ -""" -@generated by mypy-protobuf. Do not edit manually! -isort:skip_file -""" -import abc -import flwr.proto.driver_pb2 -import flwr.proto.fab_pb2 -import flwr.proto.run_pb2 -import grpc - -class DriverStub: - def __init__(self, channel: grpc.Channel) -> None: ... - CreateRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.run_pb2.CreateRunRequest, - flwr.proto.run_pb2.CreateRunResponse] - """Request run_id""" - - GetNodes: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.GetNodesRequest, - flwr.proto.driver_pb2.GetNodesResponse] - """Return a set of nodes""" - - PushTaskIns: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.PushTaskInsRequest, - flwr.proto.driver_pb2.PushTaskInsResponse] - """Create one or more tasks""" - - PullTaskRes: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.PullTaskResRequest, - flwr.proto.driver_pb2.PullTaskResResponse] - """Get task results""" - - GetRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.run_pb2.GetRunRequest, - flwr.proto.run_pb2.GetRunResponse] - """Get run details""" - - GetFab: grpc.UnaryUnaryMultiCallable[ - flwr.proto.fab_pb2.GetFabRequest, - flwr.proto.fab_pb2.GetFabResponse] - """Get FAB""" - - -class DriverServicer(metaclass=abc.ABCMeta): - @abc.abstractmethod - def CreateRun(self, - request: flwr.proto.run_pb2.CreateRunRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.run_pb2.CreateRunResponse: - """Request run_id""" - pass - - @abc.abstractmethod - def GetNodes(self, - request: flwr.proto.driver_pb2.GetNodesRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.GetNodesResponse: - """Return a set of nodes""" - pass - - @abc.abstractmethod - def PushTaskIns(self, - request: flwr.proto.driver_pb2.PushTaskInsRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.PushTaskInsResponse: - """Create one or more tasks""" - pass - - @abc.abstractmethod - def PullTaskRes(self, - request: flwr.proto.driver_pb2.PullTaskResRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.PullTaskResResponse: - """Get task results""" - pass - - @abc.abstractmethod - def GetRun(self, - request: flwr.proto.run_pb2.GetRunRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.run_pb2.GetRunResponse: - """Get run details""" - pass - - @abc.abstractmethod - def GetFab(self, - request: flwr.proto.fab_pb2.GetFabRequest, - context: grpc.ServicerContext, - ) -> flwr.proto.fab_pb2.GetFabResponse: - """Get FAB""" - pass - - -def add_DriverServicer_to_server(servicer: DriverServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/exec_pb2.py b/src/py/flwr/proto/exec_pb2.py index 574f39eaa18d..2240988e87a0 100644 --- a/src/py/flwr/proto/exec_pb2.py +++ b/src/py/flwr/proto/exec_pb2.py @@ -14,9 +14,11 @@ from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 +from flwr.proto import recordset_pb2 as flwr_dot_proto_dot_recordset__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x14\x66lwr/proto/run.proto\"\xfb\x01\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12\x35\n\x12\x66\x65\x64\x65ration_options\x18\x03 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"<\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x17\n\x0f\x61\x66ter_timestamp\x18\x02 \x01(\x01\"B\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t\x12\x18\n\x10latest_timestamp\x18\x02 \x01(\x01\"1\n\x0fListRunsRequest\x12\x13\n\x06run_id\x18\x01 \x01(\x04H\x00\x88\x01\x01\x42\t\n\x07_run_id\"\x9d\x01\n\x10ListRunsResponse\x12;\n\x08run_dict\x18\x01 \x03(\x0b\x32).flwr.proto.ListRunsResponse.RunDictEntry\x12\x0b\n\x03now\x18\x02 \x01(\t\x1a?\n\x0cRunDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0f.flwr.proto.Run:\x02\x38\x01\x32\xe9\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x12G\n\x08ListRuns\x12\x1b.flwr.proto.ListRunsRequest\x1a\x1c.flwr.proto.ListRunsResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -25,20 +27,24 @@ DESCRIPTOR._options = None _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._options = None _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' - _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._options = None - _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_options = b'8\001' - _globals['_STARTRUNREQUEST']._serialized_start=88 - _globals['_STARTRUNREQUEST']._serialized_end=439 - _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=289 - _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=362 - _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_start=364 - _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_end=439 - _globals['_STARTRUNRESPONSE']._serialized_start=441 - _globals['_STARTRUNRESPONSE']._serialized_end=475 - _globals['_STREAMLOGSREQUEST']._serialized_start=477 - _globals['_STREAMLOGSREQUEST']._serialized_end=512 - _globals['_STREAMLOGSRESPONSE']._serialized_start=514 - _globals['_STREAMLOGSRESPONSE']._serialized_end=554 - _globals['_EXEC']._serialized_start=557 - _globals['_EXEC']._serialized_end=717 + _globals['_LISTRUNSRESPONSE_RUNDICTENTRY']._options = None + _globals['_LISTRUNSRESPONSE_RUNDICTENTRY']._serialized_options = b'8\001' + _globals['_STARTRUNREQUEST']._serialized_start=138 + _globals['_STARTRUNREQUEST']._serialized_end=389 + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=316 + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=389 + _globals['_STARTRUNRESPONSE']._serialized_start=391 + _globals['_STARTRUNRESPONSE']._serialized_end=425 + _globals['_STREAMLOGSREQUEST']._serialized_start=427 + _globals['_STREAMLOGSREQUEST']._serialized_end=487 + _globals['_STREAMLOGSRESPONSE']._serialized_start=489 + _globals['_STREAMLOGSRESPONSE']._serialized_end=555 + _globals['_LISTRUNSREQUEST']._serialized_start=557 + _globals['_LISTRUNSREQUEST']._serialized_end=606 + _globals['_LISTRUNSRESPONSE']._serialized_start=609 + _globals['_LISTRUNSRESPONSE']._serialized_end=766 + _globals['_LISTRUNSRESPONSE_RUNDICTENTRY']._serialized_start=703 + _globals['_LISTRUNSRESPONSE_RUNDICTENTRY']._serialized_end=766 + _globals['_EXEC']._serialized_start=769 + _globals['_EXEC']._serialized_end=1002 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/exec_pb2.pyi b/src/py/flwr/proto/exec_pb2.pyi index 8b7e07c8875f..08e0b1c14346 100644 --- a/src/py/flwr/proto/exec_pb2.pyi +++ b/src/py/flwr/proto/exec_pb2.pyi @@ -4,6 +4,8 @@ isort:skip_file """ import builtins import flwr.proto.fab_pb2 +import flwr.proto.recordset_pb2 +import flwr.proto.run_pb2 import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -30,38 +32,23 @@ class StartRunRequest(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - class FederationConfigEntry(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - KEY_FIELD_NUMBER: builtins.int - VALUE_FIELD_NUMBER: builtins.int - key: typing.Text - @property - def value(self) -> flwr.proto.transport_pb2.Scalar: ... - def __init__(self, - *, - key: typing.Text = ..., - value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - FAB_FIELD_NUMBER: builtins.int OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int - FEDERATION_CONFIG_FIELD_NUMBER: builtins.int + FEDERATION_OPTIONS_FIELD_NUMBER: builtins.int @property def fab(self) -> flwr.proto.fab_pb2.Fab: ... @property def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... @property - def federation_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + def federation_options(self) -> flwr.proto.recordset_pb2.ConfigsRecord: ... def __init__(self, *, fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., - federation_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + federation_options: typing.Optional[flwr.proto.recordset_pb2.ConfigsRecord] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","federation_config",b"federation_config","override_config",b"override_config"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab","federation_options",b"federation_options"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","federation_options",b"federation_options","override_config",b"override_config"]) -> None: ... global___StartRunRequest = StartRunRequest class StartRunResponse(google.protobuf.message.Message): @@ -78,21 +65,70 @@ global___StartRunResponse = StartRunResponse class StreamLogsRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RUN_ID_FIELD_NUMBER: builtins.int + AFTER_TIMESTAMP_FIELD_NUMBER: builtins.int run_id: builtins.int + after_timestamp: builtins.float def __init__(self, *, run_id: builtins.int = ..., + after_timestamp: builtins.float = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["after_timestamp",b"after_timestamp","run_id",b"run_id"]) -> None: ... global___StreamLogsRequest = StreamLogsRequest class StreamLogsResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor LOG_OUTPUT_FIELD_NUMBER: builtins.int + LATEST_TIMESTAMP_FIELD_NUMBER: builtins.int log_output: typing.Text + latest_timestamp: builtins.float def __init__(self, *, log_output: typing.Text = ..., + latest_timestamp: builtins.float = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["log_output",b"log_output"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["latest_timestamp",b"latest_timestamp","log_output",b"log_output"]) -> None: ... global___StreamLogsResponse = StreamLogsResponse + +class ListRunsRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: typing.Optional[builtins.int] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_run_id",b"_run_id","run_id",b"run_id"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_run_id",b"_run_id","run_id",b"run_id"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_run_id",b"_run_id"]) -> typing.Optional[typing_extensions.Literal["run_id"]]: ... +global___ListRunsRequest = ListRunsRequest + +class ListRunsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class RunDictEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int + @property + def value(self) -> flwr.proto.run_pb2.Run: ... + def __init__(self, + *, + key: builtins.int = ..., + value: typing.Optional[flwr.proto.run_pb2.Run] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + RUN_DICT_FIELD_NUMBER: builtins.int + NOW_FIELD_NUMBER: builtins.int + @property + def run_dict(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, flwr.proto.run_pb2.Run]: ... + now: typing.Text + def __init__(self, + *, + run_dict: typing.Optional[typing.Mapping[builtins.int, flwr.proto.run_pb2.Run]] = ..., + now: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["now",b"now","run_dict",b"run_dict"]) -> None: ... +global___ListRunsResponse = ListRunsResponse diff --git a/src/py/flwr/proto/exec_pb2_grpc.py b/src/py/flwr/proto/exec_pb2_grpc.py index 8cf4ce52a300..63f9285fed58 100644 --- a/src/py/flwr/proto/exec_pb2_grpc.py +++ b/src/py/flwr/proto/exec_pb2_grpc.py @@ -24,6 +24,11 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_exec__pb2.StreamLogsRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.FromString, ) + self.ListRuns = channel.unary_unary( + '/flwr.proto.Exec/ListRuns', + request_serializer=flwr_dot_proto_dot_exec__pb2.ListRunsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_exec__pb2.ListRunsResponse.FromString, + ) class ExecServicer(object): @@ -43,6 +48,13 @@ def StreamLogs(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def ListRuns(self, request, context): + """flwr ls command + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_ExecServicer_to_server(servicer, server): rpc_method_handlers = { @@ -56,6 +68,11 @@ def add_ExecServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_exec__pb2.StreamLogsRequest.FromString, response_serializer=flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.SerializeToString, ), + 'ListRuns': grpc.unary_unary_rpc_method_handler( + servicer.ListRuns, + request_deserializer=flwr_dot_proto_dot_exec__pb2.ListRunsRequest.FromString, + response_serializer=flwr_dot_proto_dot_exec__pb2.ListRunsResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'flwr.proto.Exec', rpc_method_handlers) @@ -99,3 +116,20 @@ def StreamLogs(request, flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListRuns(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Exec/ListRuns', + flwr_dot_proto_dot_exec__pb2.ListRunsRequest.SerializeToString, + flwr_dot_proto_dot_exec__pb2.ListRunsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/exec_pb2_grpc.pyi b/src/py/flwr/proto/exec_pb2_grpc.pyi index 20da3a53f4a8..550c282bface 100644 --- a/src/py/flwr/proto/exec_pb2_grpc.pyi +++ b/src/py/flwr/proto/exec_pb2_grpc.pyi @@ -19,6 +19,11 @@ class ExecStub: flwr.proto.exec_pb2.StreamLogsResponse] """Start log stream upon request""" + ListRuns: grpc.UnaryUnaryMultiCallable[ + flwr.proto.exec_pb2.ListRunsRequest, + flwr.proto.exec_pb2.ListRunsResponse] + """flwr ls command""" + class ExecServicer(metaclass=abc.ABCMeta): @abc.abstractmethod @@ -37,5 +42,13 @@ class ExecServicer(metaclass=abc.ABCMeta): """Start log stream upon request""" pass + @abc.abstractmethod + def ListRuns(self, + request: flwr.proto.exec_pb2.ListRunsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.exec_pb2.ListRunsResponse: + """flwr ls command""" + pass + def add_ExecServicer_to_server(servicer: ExecServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/log_pb2.py b/src/py/flwr/proto/log_pb2.py new file mode 100644 index 000000000000..99aaf26f2217 --- /dev/null +++ b/src/py/flwr/proto/log_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/log.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/log.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\"O\n\x0fPushLogsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\x12\x0c\n\x04logs\x18\x03 \x03(\t\"\x12\n\x10PushLogsResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.log_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_PUSHLOGSREQUEST']._serialized_start=59 + _globals['_PUSHLOGSREQUEST']._serialized_end=138 + _globals['_PUSHLOGSRESPONSE']._serialized_start=140 + _globals['_PUSHLOGSRESPONSE']._serialized_end=158 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/log_pb2.pyi b/src/py/flwr/proto/log_pb2.pyi new file mode 100644 index 000000000000..fbb621d52b81 --- /dev/null +++ b/src/py/flwr/proto/log_pb2.pyi @@ -0,0 +1,39 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.node_pb2 +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class PushLogsRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int + LOGS_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + run_id: builtins.int + @property + def logs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + run_id: builtins.int = ..., + logs: typing.Optional[typing.Iterable[typing.Text]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["logs",b"logs","node",b"node","run_id",b"run_id"]) -> None: ... +global___PushLogsRequest = PushLogsRequest + +class PushLogsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___PushLogsResponse = PushLogsResponse diff --git a/src/py/flwr/proto/log_pb2_grpc.py b/src/py/flwr/proto/log_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/log_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/log_pb2_grpc.pyi b/src/py/flwr/proto/log_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/log_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/message_pb2.py b/src/py/flwr/proto/message_pb2.py index d2201cb07b56..92e37d3b7ed4 100644 --- a/src/py/flwr/proto/message_pb2.py +++ b/src/py/flwr/proto/message_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x04\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x04\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xcf\x02\n\x07\x43ontext\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0f\n\x07node_id\x18\x02 \x01(\x04\x12\x38\n\x0bnode_config\x18\x03 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x04 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x05 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x04\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x04\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -31,11 +31,11 @@ _globals['_MESSAGE']._serialized_start=120 _globals['_MESSAGE']._serialized_end=243 _globals['_CONTEXT']._serialized_start=246 - _globals['_CONTEXT']._serialized_end=565 - _globals['_CONTEXT_NODECONFIGENTRY']._serialized_start=426 - _globals['_CONTEXT_NODECONFIGENTRY']._serialized_end=495 - _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_start=497 - _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_end=565 - _globals['_METADATA']._serialized_start=568 - _globals['_METADATA']._serialized_end=755 + _globals['_CONTEXT']._serialized_end=581 + _globals['_CONTEXT_NODECONFIGENTRY']._serialized_start=442 + _globals['_CONTEXT_NODECONFIGENTRY']._serialized_end=511 + _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_start=513 + _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_end=581 + _globals['_METADATA']._serialized_start=584 + _globals['_METADATA']._serialized_end=771 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/message_pb2.pyi b/src/py/flwr/proto/message_pb2.pyi index b352917f217e..766829a4798c 100644 --- a/src/py/flwr/proto/message_pb2.pyi +++ b/src/py/flwr/proto/message_pb2.pyi @@ -67,10 +67,12 @@ class Context(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + RUN_ID_FIELD_NUMBER: builtins.int NODE_ID_FIELD_NUMBER: builtins.int NODE_CONFIG_FIELD_NUMBER: builtins.int STATE_FIELD_NUMBER: builtins.int RUN_CONFIG_FIELD_NUMBER: builtins.int + run_id: builtins.int node_id: builtins.int @property def node_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... @@ -80,13 +82,14 @@ class Context(google.protobuf.message.Message): def run_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... def __init__(self, *, + run_id: builtins.int = ..., node_id: builtins.int = ..., node_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., state: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., run_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["state",b"state"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["node_config",b"node_config","node_id",b"node_id","run_config",b"run_config","state",b"state"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["node_config",b"node_config","node_id",b"node_id","run_config",b"run_config","run_id",b"run_id","state",b"state"]) -> None: ... global___Context = Context class Metadata(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/run_pb2.py b/src/py/flwr/proto/run_pb2.py index cc3f6897918f..a3aac417f9a9 100644 --- a/src/py/flwr/proto/run_pb2.py +++ b/src/py/flwr/proto/run_pb2.py @@ -14,10 +14,11 @@ from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 +from flwr.proto import recordset_pb2 as flwr_dot_proto_dot_recordset__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"?\n\rGetRunRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"F\n\x13GetRunStatusRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0f\n\x07run_ids\x18\x02 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xce\x02\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x12\x12\n\npending_at\x18\x06 \x01(\t\x12\x13\n\x0bstarting_at\x18\x07 \x01(\t\x12\x12\n\nrunning_at\x18\x08 \x01(\t\x12\x13\n\x0b\x66inished_at\x18\t \x01(\t\x12%\n\x06status\x18\n \x01(\x0b\x32\x15.flwr.proto.RunStatus\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"?\n\rGetRunRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"F\n\x13GetRunStatusRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0f\n\x07run_ids\x18\x02 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\"-\n\x1bGetFederationOptionsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"U\n\x1cGetFederationOptionsResponse\x12\x35\n\x12\x66\x65\x64\x65ration_options\x18\x01 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecordb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -30,30 +31,34 @@ _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._options = None _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_options = b'8\001' - _globals['_RUN']._serialized_start=110 - _globals['_RUN']._serialized_end=323 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=250 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=323 - _globals['_RUNSTATUS']._serialized_start=325 - _globals['_RUNSTATUS']._serialized_end=389 - _globals['_CREATERUNREQUEST']._serialized_start=392 - _globals['_CREATERUNREQUEST']._serialized_end=627 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=250 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=323 - _globals['_CREATERUNRESPONSE']._serialized_start=629 - _globals['_CREATERUNRESPONSE']._serialized_end=664 - _globals['_GETRUNREQUEST']._serialized_start=666 - _globals['_GETRUNREQUEST']._serialized_end=729 - _globals['_GETRUNRESPONSE']._serialized_start=731 - _globals['_GETRUNRESPONSE']._serialized_end=777 - _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=779 - _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=862 - _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=864 - _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=889 - _globals['_GETRUNSTATUSREQUEST']._serialized_start=891 - _globals['_GETRUNSTATUSREQUEST']._serialized_end=961 - _globals['_GETRUNSTATUSRESPONSE']._serialized_start=964 - _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1141 - _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=1066 - _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1141 + _globals['_RUN']._serialized_start=138 + _globals['_RUN']._serialized_end=472 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=399 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=472 + _globals['_RUNSTATUS']._serialized_start=474 + _globals['_RUNSTATUS']._serialized_end=538 + _globals['_CREATERUNREQUEST']._serialized_start=541 + _globals['_CREATERUNREQUEST']._serialized_end=776 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=399 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=472 + _globals['_CREATERUNRESPONSE']._serialized_start=778 + _globals['_CREATERUNRESPONSE']._serialized_end=813 + _globals['_GETRUNREQUEST']._serialized_start=815 + _globals['_GETRUNREQUEST']._serialized_end=878 + _globals['_GETRUNRESPONSE']._serialized_start=880 + _globals['_GETRUNRESPONSE']._serialized_end=926 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=928 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=1011 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=1013 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=1038 + _globals['_GETRUNSTATUSREQUEST']._serialized_start=1040 + _globals['_GETRUNSTATUSREQUEST']._serialized_end=1110 + _globals['_GETRUNSTATUSRESPONSE']._serialized_start=1113 + _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1290 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=1215 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1290 + _globals['_GETFEDERATIONOPTIONSREQUEST']._serialized_start=1292 + _globals['_GETFEDERATIONOPTIONSREQUEST']._serialized_end=1337 + _globals['_GETFEDERATIONOPTIONSRESPONSE']._serialized_start=1339 + _globals['_GETFEDERATIONOPTIONSRESPONSE']._serialized_end=1424 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/run_pb2.pyi b/src/py/flwr/proto/run_pb2.pyi index 16411712eaf2..cbaad46f2785 100644 --- a/src/py/flwr/proto/run_pb2.pyi +++ b/src/py/flwr/proto/run_pb2.pyi @@ -5,6 +5,7 @@ isort:skip_file import builtins import flwr.proto.fab_pb2 import flwr.proto.node_pb2 +import flwr.proto.recordset_pb2 import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -36,12 +37,23 @@ class Run(google.protobuf.message.Message): FAB_VERSION_FIELD_NUMBER: builtins.int OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int FAB_HASH_FIELD_NUMBER: builtins.int + PENDING_AT_FIELD_NUMBER: builtins.int + STARTING_AT_FIELD_NUMBER: builtins.int + RUNNING_AT_FIELD_NUMBER: builtins.int + FINISHED_AT_FIELD_NUMBER: builtins.int + STATUS_FIELD_NUMBER: builtins.int run_id: builtins.int fab_id: typing.Text fab_version: typing.Text @property def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... fab_hash: typing.Text + pending_at: typing.Text + starting_at: typing.Text + running_at: typing.Text + finished_at: typing.Text + @property + def status(self) -> global___RunStatus: ... def __init__(self, *, run_id: builtins.int = ..., @@ -49,8 +61,14 @@ class Run(google.protobuf.message.Message): fab_version: typing.Text = ..., override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., fab_hash: typing.Text = ..., + pending_at: typing.Text = ..., + starting_at: typing.Text = ..., + running_at: typing.Text = ..., + finished_at: typing.Text = ..., + status: typing.Optional[global___RunStatus] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["fab_hash",b"fab_hash","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config","run_id",b"run_id"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["status",b"status"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_hash",b"fab_hash","fab_id",b"fab_id","fab_version",b"fab_version","finished_at",b"finished_at","override_config",b"override_config","pending_at",b"pending_at","run_id",b"run_id","running_at",b"running_at","starting_at",b"starting_at","status",b"status"]) -> None: ... global___Run = Run class RunStatus(google.protobuf.message.Message): @@ -223,3 +241,28 @@ class GetRunStatusResponse(google.protobuf.message.Message): ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["run_status_dict",b"run_status_dict"]) -> None: ... global___GetRunStatusResponse = GetRunStatusResponse + +class GetFederationOptionsRequest(google.protobuf.message.Message): + """Get Federation Options associated with run""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___GetFederationOptionsRequest = GetFederationOptionsRequest + +class GetFederationOptionsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + FEDERATION_OPTIONS_FIELD_NUMBER: builtins.int + @property + def federation_options(self) -> flwr.proto.recordset_pb2.ConfigsRecord: ... + def __init__(self, + *, + federation_options: typing.Optional[flwr.proto.recordset_pb2.ConfigsRecord] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["federation_options",b"federation_options"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["federation_options",b"federation_options"]) -> None: ... +global___GetFederationOptionsResponse = GetFederationOptionsResponse diff --git a/src/py/flwr/proto/serverappio_pb2.py b/src/py/flwr/proto/serverappio_pb2.py new file mode 100644 index 000000000000..2bbd33b5c42b --- /dev/null +++ b/src/py/flwr/proto/serverappio_pb2.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/serverappio.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import log_pb2 as flwr_dot_proto_dot_log__pb2 +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 +from flwr.proto import message_pb2 as flwr_dot_proto_dot_message__pb2 +from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/serverappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/log.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x18\x66lwr/proto/message.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\x1c\n\x1aPullServerAppInputsRequest\"\x7f\n\x1bPullServerAppInputsResponse\x12$\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x02 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Fab\"S\n\x1bPushServerAppOutputsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\"\x1e\n\x1cPushServerAppOutputsResponse2\xca\x06\n\x0bServerAppIo\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x12h\n\x13PullServerAppInputs\x12&.flwr.proto.PullServerAppInputsRequest\x1a\'.flwr.proto.PullServerAppInputsResponse\"\x00\x12k\n\x14PushServerAppOutputs\x12\'.flwr.proto.PushServerAppOutputsRequest\x1a(.flwr.proto.PushServerAppOutputsResponse\"\x00\x12\\\n\x0fUpdateRunStatus\x12\".flwr.proto.UpdateRunStatusRequest\x1a#.flwr.proto.UpdateRunStatusResponse\"\x00\x12G\n\x08PushLogs\x12\x1b.flwr.proto.PushLogsRequest\x1a\x1c.flwr.proto.PushLogsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.serverappio_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_GETNODESREQUEST']._serialized_start=182 + _globals['_GETNODESREQUEST']._serialized_end=215 + _globals['_GETNODESRESPONSE']._serialized_start=217 + _globals['_GETNODESRESPONSE']._serialized_end=268 + _globals['_PUSHTASKINSREQUEST']._serialized_start=270 + _globals['_PUSHTASKINSREQUEST']._serialized_end=334 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=336 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=375 + _globals['_PULLTASKRESREQUEST']._serialized_start=377 + _globals['_PULLTASKRESREQUEST']._serialized_end=447 + _globals['_PULLTASKRESRESPONSE']._serialized_start=449 + _globals['_PULLTASKRESRESPONSE']._serialized_end=514 + _globals['_PULLSERVERAPPINPUTSREQUEST']._serialized_start=516 + _globals['_PULLSERVERAPPINPUTSREQUEST']._serialized_end=544 + _globals['_PULLSERVERAPPINPUTSRESPONSE']._serialized_start=546 + _globals['_PULLSERVERAPPINPUTSRESPONSE']._serialized_end=673 + _globals['_PUSHSERVERAPPOUTPUTSREQUEST']._serialized_start=675 + _globals['_PUSHSERVERAPPOUTPUTSREQUEST']._serialized_end=758 + _globals['_PUSHSERVERAPPOUTPUTSRESPONSE']._serialized_start=760 + _globals['_PUSHSERVERAPPOUTPUTSRESPONSE']._serialized_end=790 + _globals['_SERVERAPPIO']._serialized_start=793 + _globals['_SERVERAPPIO']._serialized_end=1635 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/serverappio_pb2.pyi similarity index 62% rename from src/py/flwr/proto/driver_pb2.pyi rename to src/py/flwr/proto/serverappio_pb2.pyi index 77ceb496d70c..8191ec663442 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/serverappio_pb2.pyi @@ -3,7 +3,10 @@ isort:skip_file """ import builtins +import flwr.proto.fab_pb2 +import flwr.proto.message_pb2 import flwr.proto.node_pb2 +import flwr.proto.run_pb2 import flwr.proto.task_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -91,3 +94,54 @@ class PullTaskResResponse(google.protobuf.message.Message): ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["task_res_list",b"task_res_list"]) -> None: ... global___PullTaskResResponse = PullTaskResResponse + +class PullServerAppInputsRequest(google.protobuf.message.Message): + """PullServerAppInputs messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___PullServerAppInputsRequest = PullServerAppInputsRequest + +class PullServerAppInputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + CONTEXT_FIELD_NUMBER: builtins.int + RUN_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + @property + def run(self) -> flwr.proto.run_pb2.Run: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + run: typing.Optional[flwr.proto.run_pb2.Run] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","run",b"run"]) -> None: ... +global___PullServerAppInputsResponse = PullServerAppInputsResponse + +class PushServerAppOutputsRequest(google.protobuf.message.Message): + """PushServerAppOutputs messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + CONTEXT_FIELD_NUMBER: builtins.int + run_id: builtins.int + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + def __init__(self, + *, + run_id: builtins.int = ..., + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","run_id",b"run_id"]) -> None: ... +global___PushServerAppOutputsRequest = PushServerAppOutputsRequest + +class PushServerAppOutputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___PushServerAppOutputsResponse = PushServerAppOutputsResponse diff --git a/src/py/flwr/proto/serverappio_pb2_grpc.py b/src/py/flwr/proto/serverappio_pb2_grpc.py new file mode 100644 index 000000000000..1a7740db4271 --- /dev/null +++ b/src/py/flwr/proto/serverappio_pb2_grpc.py @@ -0,0 +1,376 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import log_pb2 as flwr_dot_proto_dot_log__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import serverappio_pb2 as flwr_dot_proto_dot_serverappio__pb2 + + +class ServerAppIoStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateRun = channel.unary_unary( + '/flwr.proto.ServerAppIo/CreateRun', + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + ) + self.GetNodes = channel.unary_unary( + '/flwr.proto.ServerAppIo/GetNodes', + request_serializer=flwr_dot_proto_dot_serverappio__pb2.GetNodesRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_serverappio__pb2.GetNodesResponse.FromString, + ) + self.PushTaskIns = channel.unary_unary( + '/flwr.proto.ServerAppIo/PushTaskIns', + request_serializer=flwr_dot_proto_dot_serverappio__pb2.PushTaskInsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_serverappio__pb2.PushTaskInsResponse.FromString, + ) + self.PullTaskRes = channel.unary_unary( + '/flwr.proto.ServerAppIo/PullTaskRes', + request_serializer=flwr_dot_proto_dot_serverappio__pb2.PullTaskResRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_serverappio__pb2.PullTaskResResponse.FromString, + ) + self.GetRun = channel.unary_unary( + '/flwr.proto.ServerAppIo/GetRun', + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + ) + self.GetFab = channel.unary_unary( + '/flwr.proto.ServerAppIo/GetFab', + request_serializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, + ) + self.PullServerAppInputs = channel.unary_unary( + '/flwr.proto.ServerAppIo/PullServerAppInputs', + request_serializer=flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsResponse.FromString, + ) + self.PushServerAppOutputs = channel.unary_unary( + '/flwr.proto.ServerAppIo/PushServerAppOutputs', + request_serializer=flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsResponse.FromString, + ) + self.UpdateRunStatus = channel.unary_unary( + '/flwr.proto.ServerAppIo/UpdateRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + ) + self.PushLogs = channel.unary_unary( + '/flwr.proto.ServerAppIo/PushLogs', + request_serializer=flwr_dot_proto_dot_log__pb2.PushLogsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_log__pb2.PushLogsResponse.FromString, + ) + + +class ServerAppIoServicer(object): + """Missing associated documentation comment in .proto file.""" + + def CreateRun(self, request, context): + """Request run_id + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetNodes(self, request, context): + """Return a set of nodes + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushTaskIns(self, request, context): + """Create one or more tasks + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PullTaskRes(self, request, context): + """Get task results + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetRun(self, request, context): + """Get run details + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetFab(self, request, context): + """Get FAB + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PullServerAppInputs(self, request, context): + """Pull ServerApp inputs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushServerAppOutputs(self, request, context): + """Push ServerApp outputs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateRunStatus(self, request, context): + """Update the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushLogs(self, request, context): + """Push ServerApp logs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ServerAppIoServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateRun': grpc.unary_unary_rpc_method_handler( + servicer.CreateRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, + ), + 'GetNodes': grpc.unary_unary_rpc_method_handler( + servicer.GetNodes, + request_deserializer=flwr_dot_proto_dot_serverappio__pb2.GetNodesRequest.FromString, + response_serializer=flwr_dot_proto_dot_serverappio__pb2.GetNodesResponse.SerializeToString, + ), + 'PushTaskIns': grpc.unary_unary_rpc_method_handler( + servicer.PushTaskIns, + request_deserializer=flwr_dot_proto_dot_serverappio__pb2.PushTaskInsRequest.FromString, + response_serializer=flwr_dot_proto_dot_serverappio__pb2.PushTaskInsResponse.SerializeToString, + ), + 'PullTaskRes': grpc.unary_unary_rpc_method_handler( + servicer.PullTaskRes, + request_deserializer=flwr_dot_proto_dot_serverappio__pb2.PullTaskResRequest.FromString, + response_serializer=flwr_dot_proto_dot_serverappio__pb2.PullTaskResResponse.SerializeToString, + ), + 'GetRun': grpc.unary_unary_rpc_method_handler( + servicer.GetRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.SerializeToString, + ), + 'GetFab': grpc.unary_unary_rpc_method_handler( + servicer.GetFab, + request_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.FromString, + response_serializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.SerializeToString, + ), + 'PullServerAppInputs': grpc.unary_unary_rpc_method_handler( + servicer.PullServerAppInputs, + request_deserializer=flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsResponse.SerializeToString, + ), + 'PushServerAppOutputs': grpc.unary_unary_rpc_method_handler( + servicer.PushServerAppOutputs, + request_deserializer=flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsResponse.SerializeToString, + ), + 'UpdateRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.SerializeToString, + ), + 'PushLogs': grpc.unary_unary_rpc_method_handler( + servicer.PushLogs, + request_deserializer=flwr_dot_proto_dot_log__pb2.PushLogsRequest.FromString, + response_serializer=flwr_dot_proto_dot_log__pb2.PushLogsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.ServerAppIo', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class ServerAppIo(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def CreateRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/CreateRun', + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetNodes(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/GetNodes', + flwr_dot_proto_dot_serverappio__pb2.GetNodesRequest.SerializeToString, + flwr_dot_proto_dot_serverappio__pb2.GetNodesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushTaskIns(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/PushTaskIns', + flwr_dot_proto_dot_serverappio__pb2.PushTaskInsRequest.SerializeToString, + flwr_dot_proto_dot_serverappio__pb2.PushTaskInsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PullTaskRes(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/PullTaskRes', + flwr_dot_proto_dot_serverappio__pb2.PullTaskResRequest.SerializeToString, + flwr_dot_proto_dot_serverappio__pb2.PullTaskResResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/GetRun', + flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetFab(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/GetFab', + flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PullServerAppInputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/PullServerAppInputs', + flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsRequest.SerializeToString, + flwr_dot_proto_dot_serverappio__pb2.PullServerAppInputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushServerAppOutputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/PushServerAppOutputs', + flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsRequest.SerializeToString, + flwr_dot_proto_dot_serverappio__pb2.PushServerAppOutputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/UpdateRunStatus', + flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushLogs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ServerAppIo/PushLogs', + flwr_dot_proto_dot_log__pb2.PushLogsRequest.SerializeToString, + flwr_dot_proto_dot_log__pb2.PushLogsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/serverappio_pb2_grpc.pyi b/src/py/flwr/proto/serverappio_pb2_grpc.pyi new file mode 100644 index 000000000000..aa2d29473ae8 --- /dev/null +++ b/src/py/flwr/proto/serverappio_pb2_grpc.pyi @@ -0,0 +1,147 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.fab_pb2 +import flwr.proto.log_pb2 +import flwr.proto.run_pb2 +import flwr.proto.serverappio_pb2 +import grpc + +class ServerAppIoStub: + def __init__(self, channel: grpc.Channel) -> None: ... + CreateRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] + """Request run_id""" + + GetNodes: grpc.UnaryUnaryMultiCallable[ + flwr.proto.serverappio_pb2.GetNodesRequest, + flwr.proto.serverappio_pb2.GetNodesResponse] + """Return a set of nodes""" + + PushTaskIns: grpc.UnaryUnaryMultiCallable[ + flwr.proto.serverappio_pb2.PushTaskInsRequest, + flwr.proto.serverappio_pb2.PushTaskInsResponse] + """Create one or more tasks""" + + PullTaskRes: grpc.UnaryUnaryMultiCallable[ + flwr.proto.serverappio_pb2.PullTaskResRequest, + flwr.proto.serverappio_pb2.PullTaskResResponse] + """Get task results""" + + GetRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetRunRequest, + flwr.proto.run_pb2.GetRunResponse] + """Get run details""" + + GetFab: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fab_pb2.GetFabRequest, + flwr.proto.fab_pb2.GetFabResponse] + """Get FAB""" + + PullServerAppInputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.serverappio_pb2.PullServerAppInputsRequest, + flwr.proto.serverappio_pb2.PullServerAppInputsResponse] + """Pull ServerApp inputs""" + + PushServerAppOutputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.serverappio_pb2.PushServerAppOutputsRequest, + flwr.proto.serverappio_pb2.PushServerAppOutputsResponse] + """Push ServerApp outputs""" + + UpdateRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.UpdateRunStatusRequest, + flwr.proto.run_pb2.UpdateRunStatusResponse] + """Update the status of a given run""" + + PushLogs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.log_pb2.PushLogsRequest, + flwr.proto.log_pb2.PushLogsResponse] + """Push ServerApp logs""" + + +class ServerAppIoServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def CreateRun(self, + request: flwr.proto.run_pb2.CreateRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.CreateRunResponse: + """Request run_id""" + pass + + @abc.abstractmethod + def GetNodes(self, + request: flwr.proto.serverappio_pb2.GetNodesRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.serverappio_pb2.GetNodesResponse: + """Return a set of nodes""" + pass + + @abc.abstractmethod + def PushTaskIns(self, + request: flwr.proto.serverappio_pb2.PushTaskInsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.serverappio_pb2.PushTaskInsResponse: + """Create one or more tasks""" + pass + + @abc.abstractmethod + def PullTaskRes(self, + request: flwr.proto.serverappio_pb2.PullTaskResRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.serverappio_pb2.PullTaskResResponse: + """Get task results""" + pass + + @abc.abstractmethod + def GetRun(self, + request: flwr.proto.run_pb2.GetRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetRunResponse: + """Get run details""" + pass + + @abc.abstractmethod + def GetFab(self, + request: flwr.proto.fab_pb2.GetFabRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fab_pb2.GetFabResponse: + """Get FAB""" + pass + + @abc.abstractmethod + def PullServerAppInputs(self, + request: flwr.proto.serverappio_pb2.PullServerAppInputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.serverappio_pb2.PullServerAppInputsResponse: + """Pull ServerApp inputs""" + pass + + @abc.abstractmethod + def PushServerAppOutputs(self, + request: flwr.proto.serverappio_pb2.PushServerAppOutputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.serverappio_pb2.PushServerAppOutputsResponse: + """Push ServerApp outputs""" + pass + + @abc.abstractmethod + def UpdateRunStatus(self, + request: flwr.proto.run_pb2.UpdateRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.UpdateRunStatusResponse: + """Update the status of a given run""" + pass + + @abc.abstractmethod + def PushLogs(self, + request: flwr.proto.log_pb2.PushLogsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.log_pb2.PushLogsResponse: + """Push ServerApp logs""" + pass + + +def add_ServerAppIoServicer_to_server(servicer: ServerAppIoServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/simulationio_pb2.py b/src/py/flwr/proto/simulationio_pb2.py new file mode 100644 index 000000000000..c7392471f58d --- /dev/null +++ b/src/py/flwr/proto/simulationio_pb2.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/simulationio.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import log_pb2 as flwr_dot_proto_dot_log__pb2 +from flwr.proto import message_pb2 as flwr_dot_proto_dot_message__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1d\x66lwr/proto/simulationio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/log.proto\x1a\x18\x66lwr/proto/message.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"\x1d\n\x1bPullSimulationInputsRequest\"\x80\x01\n\x1cPullSimulationInputsResponse\x12$\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x02 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Fab\"T\n\x1cPushSimulationOutputsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\"\x1f\n\x1dPushSimulationOutputsResponse2\xff\x03\n\x0cSimulationIo\x12k\n\x14PullSimulationInputs\x12\'.flwr.proto.PullSimulationInputsRequest\x1a(.flwr.proto.PullSimulationInputsResponse\"\x00\x12n\n\x15PushSimulationOutputs\x12(.flwr.proto.PushSimulationOutputsRequest\x1a).flwr.proto.PushSimulationOutputsResponse\"\x00\x12\\\n\x0fUpdateRunStatus\x12\".flwr.proto.UpdateRunStatusRequest\x1a#.flwr.proto.UpdateRunStatusResponse\"\x00\x12G\n\x08PushLogs\x12\x1b.flwr.proto.PushLogsRequest\x1a\x1c.flwr.proto.PushLogsResponse\"\x00\x12k\n\x14GetFederationOptions\x12\'.flwr.proto.GetFederationOptionsRequest\x1a(.flwr.proto.GetFederationOptionsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.simulationio_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_PULLSIMULATIONINPUTSREQUEST']._serialized_start=137 + _globals['_PULLSIMULATIONINPUTSREQUEST']._serialized_end=166 + _globals['_PULLSIMULATIONINPUTSRESPONSE']._serialized_start=169 + _globals['_PULLSIMULATIONINPUTSRESPONSE']._serialized_end=297 + _globals['_PUSHSIMULATIONOUTPUTSREQUEST']._serialized_start=299 + _globals['_PUSHSIMULATIONOUTPUTSREQUEST']._serialized_end=383 + _globals['_PUSHSIMULATIONOUTPUTSRESPONSE']._serialized_start=385 + _globals['_PUSHSIMULATIONOUTPUTSRESPONSE']._serialized_end=416 + _globals['_SIMULATIONIO']._serialized_start=419 + _globals['_SIMULATIONIO']._serialized_end=930 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/simulationio_pb2.pyi b/src/py/flwr/proto/simulationio_pb2.pyi new file mode 100644 index 000000000000..648bd0b71f21 --- /dev/null +++ b/src/py/flwr/proto/simulationio_pb2.pyi @@ -0,0 +1,65 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.fab_pb2 +import flwr.proto.message_pb2 +import flwr.proto.run_pb2 +import google.protobuf.descriptor +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class PullSimulationInputsRequest(google.protobuf.message.Message): + """PullSimulationInputs messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___PullSimulationInputsRequest = PullSimulationInputsRequest + +class PullSimulationInputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + CONTEXT_FIELD_NUMBER: builtins.int + RUN_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + @property + def run(self) -> flwr.proto.run_pb2.Run: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + run: typing.Optional[flwr.proto.run_pb2.Run] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","run",b"run"]) -> None: ... +global___PullSimulationInputsResponse = PullSimulationInputsResponse + +class PushSimulationOutputsRequest(google.protobuf.message.Message): + """PushSimulationOutputs messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + CONTEXT_FIELD_NUMBER: builtins.int + run_id: builtins.int + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + def __init__(self, + *, + run_id: builtins.int = ..., + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","run_id",b"run_id"]) -> None: ... +global___PushSimulationOutputsRequest = PushSimulationOutputsRequest + +class PushSimulationOutputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___PushSimulationOutputsResponse = PushSimulationOutputsResponse diff --git a/src/py/flwr/proto/simulationio_pb2_grpc.py b/src/py/flwr/proto/simulationio_pb2_grpc.py new file mode 100644 index 000000000000..b36110253ba2 --- /dev/null +++ b/src/py/flwr/proto/simulationio_pb2_grpc.py @@ -0,0 +1,205 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import log_pb2 as flwr_dot_proto_dot_log__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import simulationio_pb2 as flwr_dot_proto_dot_simulationio__pb2 + + +class SimulationIoStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.PullSimulationInputs = channel.unary_unary( + '/flwr.proto.SimulationIo/PullSimulationInputs', + request_serializer=flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsResponse.FromString, + ) + self.PushSimulationOutputs = channel.unary_unary( + '/flwr.proto.SimulationIo/PushSimulationOutputs', + request_serializer=flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsResponse.FromString, + ) + self.UpdateRunStatus = channel.unary_unary( + '/flwr.proto.SimulationIo/UpdateRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + ) + self.PushLogs = channel.unary_unary( + '/flwr.proto.SimulationIo/PushLogs', + request_serializer=flwr_dot_proto_dot_log__pb2.PushLogsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_log__pb2.PushLogsResponse.FromString, + ) + self.GetFederationOptions = channel.unary_unary( + '/flwr.proto.SimulationIo/GetFederationOptions', + request_serializer=flwr_dot_proto_dot_run__pb2.GetFederationOptionsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetFederationOptionsResponse.FromString, + ) + + +class SimulationIoServicer(object): + """Missing associated documentation comment in .proto file.""" + + def PullSimulationInputs(self, request, context): + """Pull Simulation inputs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushSimulationOutputs(self, request, context): + """Push Simulation outputs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateRunStatus(self, request, context): + """Update the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushLogs(self, request, context): + """Push ServerApp logs + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetFederationOptions(self, request, context): + """Get Federation Options + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_SimulationIoServicer_to_server(servicer, server): + rpc_method_handlers = { + 'PullSimulationInputs': grpc.unary_unary_rpc_method_handler( + servicer.PullSimulationInputs, + request_deserializer=flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsResponse.SerializeToString, + ), + 'PushSimulationOutputs': grpc.unary_unary_rpc_method_handler( + servicer.PushSimulationOutputs, + request_deserializer=flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsResponse.SerializeToString, + ), + 'UpdateRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.SerializeToString, + ), + 'PushLogs': grpc.unary_unary_rpc_method_handler( + servicer.PushLogs, + request_deserializer=flwr_dot_proto_dot_log__pb2.PushLogsRequest.FromString, + response_serializer=flwr_dot_proto_dot_log__pb2.PushLogsResponse.SerializeToString, + ), + 'GetFederationOptions': grpc.unary_unary_rpc_method_handler( + servicer.GetFederationOptions, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetFederationOptionsRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetFederationOptionsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.SimulationIo', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class SimulationIo(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def PullSimulationInputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.SimulationIo/PullSimulationInputs', + flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsRequest.SerializeToString, + flwr_dot_proto_dot_simulationio__pb2.PullSimulationInputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushSimulationOutputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.SimulationIo/PushSimulationOutputs', + flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsRequest.SerializeToString, + flwr_dot_proto_dot_simulationio__pb2.PushSimulationOutputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.SimulationIo/UpdateRunStatus', + flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushLogs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.SimulationIo/PushLogs', + flwr_dot_proto_dot_log__pb2.PushLogsRequest.SerializeToString, + flwr_dot_proto_dot_log__pb2.PushLogsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetFederationOptions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.SimulationIo/GetFederationOptions', + flwr_dot_proto_dot_run__pb2.GetFederationOptionsRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetFederationOptionsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/simulationio_pb2_grpc.pyi b/src/py/flwr/proto/simulationio_pb2_grpc.pyi new file mode 100644 index 000000000000..aa78010b047f --- /dev/null +++ b/src/py/flwr/proto/simulationio_pb2_grpc.pyi @@ -0,0 +1,81 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.log_pb2 +import flwr.proto.run_pb2 +import flwr.proto.simulationio_pb2 +import grpc + +class SimulationIoStub: + def __init__(self, channel: grpc.Channel) -> None: ... + PullSimulationInputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.simulationio_pb2.PullSimulationInputsRequest, + flwr.proto.simulationio_pb2.PullSimulationInputsResponse] + """Pull Simulation inputs""" + + PushSimulationOutputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.simulationio_pb2.PushSimulationOutputsRequest, + flwr.proto.simulationio_pb2.PushSimulationOutputsResponse] + """Push Simulation outputs""" + + UpdateRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.UpdateRunStatusRequest, + flwr.proto.run_pb2.UpdateRunStatusResponse] + """Update the status of a given run""" + + PushLogs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.log_pb2.PushLogsRequest, + flwr.proto.log_pb2.PushLogsResponse] + """Push ServerApp logs""" + + GetFederationOptions: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetFederationOptionsRequest, + flwr.proto.run_pb2.GetFederationOptionsResponse] + """Get Federation Options""" + + +class SimulationIoServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def PullSimulationInputs(self, + request: flwr.proto.simulationio_pb2.PullSimulationInputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.simulationio_pb2.PullSimulationInputsResponse: + """Pull Simulation inputs""" + pass + + @abc.abstractmethod + def PushSimulationOutputs(self, + request: flwr.proto.simulationio_pb2.PushSimulationOutputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.simulationio_pb2.PushSimulationOutputsResponse: + """Push Simulation outputs""" + pass + + @abc.abstractmethod + def UpdateRunStatus(self, + request: flwr.proto.run_pb2.UpdateRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.UpdateRunStatusResponse: + """Update the status of a given run""" + pass + + @abc.abstractmethod + def PushLogs(self, + request: flwr.proto.log_pb2.PushLogsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.log_pb2.PushLogsResponse: + """Push ServerApp logs""" + pass + + @abc.abstractmethod + def GetFederationOptions(self, + request: flwr.proto.run_pb2.GetFederationOptionsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetFederationOptionsResponse: + """Get Federation Options""" + pass + + +def add_SimulationIoServicer_to_server(servicer: SimulationIoServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 58918dbb79ab..508f2dfc9685 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -17,12 +17,13 @@ import argparse import csv import importlib.util +import subprocess import sys import threading from collections.abc import Sequence -from logging import INFO, WARN -from os.path import isfile +from logging import DEBUG, INFO, WARN from pathlib import Path +from time import sleep from typing import Optional import grpc @@ -35,19 +36,26 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address -from flwr.common.config import get_flwr_dir +from flwr.common.args import try_obtain_server_certificates +from flwr.common.config import get_flwr_dir, parse_config_args from flwr.common.constant import ( - DRIVER_API_DEFAULT_ADDRESS, + CLIENT_OCTET, + EXEC_API_DEFAULT_SERVER_ADDRESS, FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, FLEET_API_REST_DEFAULT_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, MISSING_EXTRA_REST, + SERVER_OCTET, + SERVERAPPIO_API_DEFAULT_SERVER_ADDRESS, + SIMULATIONIO_API_DEFAULT_SERVER_ADDRESS, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, ) from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log +from flwr.common.logger import log, warn_deprecated_feature from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( private_key_to_bytes, public_key_to_bytes, @@ -56,13 +64,15 @@ add_FleetServicer_to_server, ) from flwr.proto.grpcadapter_pb2_grpc import add_GrpcAdapterServicer_to_server +from flwr.superexec.app import load_executor +from flwr.superexec.exec_grpc import run_exec_api_grpc from .client_manager import ClientManager from .history import History from .server import Server, init_defaults, run_fl from .server_config import ServerConfig from .strategy import Strategy -from .superlink.driver.driver_grpc import run_driver_api_grpc +from .superlink.driver.serverappio_grpc import run_serverappio_api_grpc from .superlink.ffs.ffs_factory import FfsFactory from .superlink.fleet.grpc_adapter.grpc_adapter_servicer import GrpcAdapterServicer from .superlink.fleet.grpc_bidi.grpc_server import ( @@ -71,7 +81,8 @@ ) from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor -from .superlink.state import StateFactory +from .superlink.linkstate import LinkStateFactory +from .superlink.simulation.simulationio_grpc import run_simulationio_api_grpc DATABASE = ":flwr-in-memory-state:" BASE_DIR = get_flwr_dir() / "superlink" / "ffs" @@ -89,6 +100,11 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals ) -> History: """Start a Flower server using the gRPC transport layer. + Warning + ------- + This function is deprecated since 1.13.0. Use the :code:`flower-superlink` command + instead to start a SuperLink. + Parameters ---------- server_address : Optional[str] @@ -146,6 +162,17 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals >>> ) >>> ) """ + msg = ( + "flwr.server.start_server() is deprecated." + "\n\tInstead, use the `flower-superlink` CLI command to start a SuperLink " + "as shown below:" + "\n\n\t\t$ flower-superlink --insecure" + "\n\n\tTo view usage and all available options, run:" + "\n\n\t\t$ flower-superlink --help" + "\n\n\tUsing `start_server()` is deprecated." + ) + warn_deprecated_feature(name=msg) + event(EventType.START_SERVER_ENTER) # Parse IP address @@ -198,125 +225,186 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals # pylint: disable=too-many-branches, too-many-locals, too-many-statements def run_superlink() -> None: - """Run Flower SuperLink (Driver API and Fleet API).""" + """Run Flower SuperLink (ServerAppIo API and Fleet API).""" args = _parse_args_run_superlink().parse_args() log(INFO, "Starting Flower SuperLink") event(EventType.RUN_SUPERLINK_ENTER) - # Parse IP address - driver_address, _, _ = _format_address(args.driver_api_address) + # Warn unused options + if args.flwr_dir is not None: + log( + WARN, "The `--flwr-dir` option is currently not in use and will be ignored." + ) + + # Parse IP addresses + serverappio_address, _, _ = _format_address(args.serverappio_api_address) + exec_address, _, _ = _format_address(args.exec_api_address) + simulationio_address, _, _ = _format_address(args.simulationio_api_address) # Obtain certificates - certificates = _try_obtain_certificates(args) + certificates = try_obtain_server_certificates(args, args.fleet_api_type) # Initialize StateFactory - state_factory = StateFactory(args.database) + state_factory = LinkStateFactory(args.database) # Initialize FfsFactory ffs_factory = FfsFactory(args.storage_dir) - # Start Driver API - driver_server: grpc.Server = run_driver_api_grpc( - address=driver_address, + # Start Exec API + executor = load_executor(args) + exec_server: grpc.Server = run_exec_api_grpc( + address=exec_address, state_factory=state_factory, ffs_factory=ffs_factory, + executor=executor, certificates=certificates, + config=parse_config_args( + [args.executor_config] if args.executor_config else args.executor_config + ), ) + grpc_servers = [exec_server] - grpc_servers = [driver_server] + # Determine Exec plugin + # If simulation is used, don't start ServerAppIo and Fleet APIs + sim_exec = executor.__class__.__qualname__ == "SimulationEngine" bckg_threads = [] - if not args.fleet_api_address: - if args.fleet_api_type in [ - TRANSPORT_TYPE_GRPC_RERE, - TRANSPORT_TYPE_GRPC_ADAPTER, - ]: - args.fleet_api_address = FLEET_API_GRPC_RERE_DEFAULT_ADDRESS - elif args.fleet_api_type == TRANSPORT_TYPE_REST: - args.fleet_api_address = FLEET_API_REST_DEFAULT_ADDRESS - - fleet_address, host, port = _format_address(args.fleet_api_address) - num_workers = args.fleet_api_num_workers - if num_workers != 1: - log( - WARN, - "The Fleet API currently supports only 1 worker. " - "You have specified %d workers. " - "Support for multiple workers will be added in future releases. " - "Proceeding with a single worker.", - args.fleet_api_num_workers, - ) - num_workers = 1 - - # Start Fleet API - if args.fleet_api_type == TRANSPORT_TYPE_REST: - if ( - importlib.util.find_spec("requests") - and importlib.util.find_spec("starlette") - and importlib.util.find_spec("uvicorn") - ) is None: - sys.exit(MISSING_EXTRA_REST) - - _, ssl_certfile, ssl_keyfile = ( - certificates if certificates is not None else (None, None, None) + if sim_exec: + simulationio_server: grpc.Server = run_simulationio_api_grpc( + address=simulationio_address, + state_factory=state_factory, + ffs_factory=ffs_factory, + certificates=None, # SimulationAppIo API doesn't support SSL yet ) + grpc_servers.append(simulationio_server) - fleet_thread = threading.Thread( - target=_run_fleet_api_rest, - args=( - host, - port, - ssl_keyfile, - ssl_certfile, - state_factory, - ffs_factory, - num_workers, - ), + else: + # Start ServerAppIo API + serverappio_server: grpc.Server = run_serverappio_api_grpc( + address=serverappio_address, + state_factory=state_factory, + ffs_factory=ffs_factory, + certificates=None, # ServerAppIo API doesn't support SSL yet ) - fleet_thread.start() - bckg_threads.append(fleet_thread) - elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: - maybe_keys = _try_setup_node_authentication(args, certificates) - interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None - if maybe_keys is not None: - ( - node_public_keys, - server_private_key, - server_public_key, - ) = maybe_keys - state = state_factory.state() - state.store_node_public_keys(node_public_keys) - state.store_server_private_public_key( - private_key_to_bytes(server_private_key), - public_key_to_bytes(server_public_key), - ) + grpc_servers.append(serverappio_server) + + # Start Fleet API + if not args.fleet_api_address: + if args.fleet_api_type in [ + TRANSPORT_TYPE_GRPC_RERE, + TRANSPORT_TYPE_GRPC_ADAPTER, + ]: + args.fleet_api_address = FLEET_API_GRPC_RERE_DEFAULT_ADDRESS + elif args.fleet_api_type == TRANSPORT_TYPE_REST: + args.fleet_api_address = FLEET_API_REST_DEFAULT_ADDRESS + + fleet_address, host, port = _format_address(args.fleet_api_address) + + num_workers = args.fleet_api_num_workers + if num_workers != 1: log( - INFO, - "Node authentication enabled with %d known public keys", - len(node_public_keys), + WARN, + "The Fleet API currently supports only 1 worker. " + "You have specified %d workers. " + "Support for multiple workers will be added in future releases. " + "Proceeding with a single worker.", + args.fleet_api_num_workers, + ) + num_workers = 1 + + if args.fleet_api_type == TRANSPORT_TYPE_REST: + if ( + importlib.util.find_spec("requests") + and importlib.util.find_spec("starlette") + and importlib.util.find_spec("uvicorn") + ) is None: + sys.exit(MISSING_EXTRA_REST) + + _, ssl_certfile, ssl_keyfile = ( + certificates if certificates is not None else (None, None, None) ) - interceptors = [AuthenticateServerInterceptor(state)] - fleet_server = _run_fleet_api_grpc_rere( - address=fleet_address, - state_factory=state_factory, - ffs_factory=ffs_factory, - certificates=certificates, - interceptors=interceptors, + fleet_thread = threading.Thread( + target=_run_fleet_api_rest, + args=( + host, + port, + ssl_keyfile, + ssl_certfile, + state_factory, + ffs_factory, + num_workers, + ), + ) + fleet_thread.start() + bckg_threads.append(fleet_thread) + elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: + maybe_keys = _try_setup_node_authentication(args, certificates) + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None + if maybe_keys is not None: + ( + node_public_keys, + server_private_key, + server_public_key, + ) = maybe_keys + state = state_factory.state() + state.store_node_public_keys(node_public_keys) + state.store_server_private_public_key( + private_key_to_bytes(server_private_key), + public_key_to_bytes(server_public_key), + ) + log( + INFO, + "Node authentication enabled with %d known public keys", + len(node_public_keys), + ) + interceptors = [AuthenticateServerInterceptor(state_factory)] + + fleet_server = _run_fleet_api_grpc_rere( + address=fleet_address, + state_factory=state_factory, + ffs_factory=ffs_factory, + certificates=certificates, + interceptors=interceptors, + ) + grpc_servers.append(fleet_server) + elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_ADAPTER: + fleet_server = _run_fleet_api_grpc_adapter( + address=fleet_address, + state_factory=state_factory, + ffs_factory=ffs_factory, + certificates=certificates, + ) + grpc_servers.append(fleet_server) + else: + raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") + + if args.isolation == ISOLATION_MODE_SUBPROCESS: + + _octet, _colon, _port = serverappio_address.rpartition(":") + io_address = ( + f"{CLIENT_OCTET}:{_port}" if _octet == SERVER_OCTET else serverappio_address ) - grpc_servers.append(fleet_server) - elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_ADAPTER: - fleet_server = _run_fleet_api_grpc_adapter( - address=fleet_address, - state_factory=state_factory, - ffs_factory=ffs_factory, - certificates=certificates, + address_arg = ( + "--simulationio-api-address" if sim_exec else "--serverappio-api-address" ) - grpc_servers.append(fleet_server) - else: - raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") + address = simulationio_address if sim_exec else io_address + cmd = "flwr-simulation" if sim_exec else "flwr-serverapp" + + # Scheduler thread + scheduler_th = threading.Thread( + target=_flwr_scheduler, + args=( + state_factory, + address_arg, + address, + cmd, + ), + ) + scheduler_th.start() + bckg_threads.append(scheduler_th) # Graceful shutdown register_exit_handlers( @@ -331,7 +419,45 @@ def run_superlink() -> None: for thread in bckg_threads: if not thread.is_alive(): sys.exit(1) - driver_server.wait_for_termination(timeout=1) + exec_server.wait_for_termination(timeout=1) + + +def _flwr_scheduler( + state_factory: LinkStateFactory, + io_api_arg: str, + io_api_address: str, + cmd: str, +) -> None: + log(DEBUG, "Started %s scheduler thread.", cmd) + + state = state_factory.state() + + # Periodically check for a pending run in the LinkState + while True: + sleep(3) + pending_run_id = state.get_pending_run_id() + + if pending_run_id: + + log( + INFO, + "Launching %s subprocess. Connects to SuperLink on %s", + cmd, + io_api_address, + ) + # Start subprocess + command = [ + cmd, + "--run-once", + io_api_arg, + io_api_address, + "--insecure", + ] + + subprocess.Popen( # pylint: disable=consider-using-with + command, + text=True, + ) def _format_address(address: str) -> tuple[str, str, int]: @@ -433,63 +559,9 @@ def _try_setup_node_authentication( ) -def _try_obtain_certificates( - args: argparse.Namespace, -) -> Optional[tuple[bytes, bytes, bytes]]: - # Obtain certificates - if args.insecure: - log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") - return None - # Check if certificates are provided - if args.fleet_api_type in [TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_GRPC_ADAPTER]: - if args.ssl_certfile and args.ssl_keyfile and args.ssl_ca_certfile: - if not isfile(args.ssl_ca_certfile): - sys.exit("Path argument `--ssl-ca-certfile` does not point to a file.") - if not isfile(args.ssl_certfile): - sys.exit("Path argument `--ssl-certfile` does not point to a file.") - if not isfile(args.ssl_keyfile): - sys.exit("Path argument `--ssl-keyfile` does not point to a file.") - certificates = ( - Path(args.ssl_ca_certfile).read_bytes(), # CA certificate - Path(args.ssl_certfile).read_bytes(), # server certificate - Path(args.ssl_keyfile).read_bytes(), # server private key - ) - return certificates - if args.ssl_certfile or args.ssl_keyfile or args.ssl_ca_certfile: - sys.exit( - "You need to provide valid file paths to `--ssl-certfile`, " - "`--ssl-keyfile`, and `—-ssl-ca-certfile` to create a secure " - "connection in Fleet API server (gRPC-rere)." - ) - if args.fleet_api_type == TRANSPORT_TYPE_REST: - if args.ssl_certfile and args.ssl_keyfile: - if not isfile(args.ssl_certfile): - sys.exit("Path argument `--ssl-certfile` does not point to a file.") - if not isfile(args.ssl_keyfile): - sys.exit("Path argument `--ssl-keyfile` does not point to a file.") - certificates = ( - b"", - Path(args.ssl_certfile).read_bytes(), # server certificate - Path(args.ssl_keyfile).read_bytes(), # server private key - ) - return certificates - if args.ssl_certfile or args.ssl_keyfile: - sys.exit( - "You need to provide valid file paths to `--ssl-certfile` " - "and `--ssl-keyfile` to create a secure connection " - "in Fleet API server (REST, experimental)." - ) - sys.exit( - "Certificates are required unless running in insecure mode. " - "Please provide certificate paths to `--ssl-certfile`, " - "`--ssl-keyfile`, and `—-ssl-ca-certfile` or run the server " - "in insecure mode using '--insecure' if you understand the risks." - ) - - def _run_fleet_api_grpc_rere( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, certificates: Optional[tuple[bytes, bytes, bytes]], interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, @@ -517,7 +589,7 @@ def _run_fleet_api_grpc_rere( def _run_fleet_api_grpc_adapter( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: @@ -548,11 +620,11 @@ def _run_fleet_api_rest( port: int, ssl_keyfile: Optional[str], ssl_certfile: Optional[str], - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, num_workers: int, ) -> None: - """Run Driver API (REST-based).""" + """Run ServerAppIo API (REST-based).""" try: import uvicorn @@ -579,14 +651,16 @@ def _run_fleet_api_rest( def _parse_args_run_superlink() -> argparse.ArgumentParser: - """Parse command line arguments for both Driver API and Fleet API.""" + """Parse command line arguments for both ServerAppIo API and Fleet API.""" parser = argparse.ArgumentParser( description="Start a Flower SuperLink", ) _add_args_common(parser=parser) - _add_args_driver_api(parser=parser) + _add_args_serverappio_api(parser=parser) _add_args_fleet_api(parser=parser) + _add_args_exec_api(parser=parser) + _add_args_simulationio_api(parser=parser) return parser @@ -599,6 +673,17 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "paths are provided. By default, the server runs with HTTPS enabled. " "Use this flag only if you understand the risks.", ) + parser.add_argument( + "--flwr-dir", + default=None, + help="""The path containing installed Flower Apps. + The default directory is: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, + ) parser.add_argument( "--ssl-certfile", help="Fleet API server SSL certificate file (as a path str) " @@ -618,6 +703,19 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "to create a secure connection.", type=str, ) + parser.add_argument( + "--isolation", + default=ISOLATION_MODE_SUBPROCESS, + required=False, + choices=[ + ISOLATION_MODE_SUBPROCESS, + ISOLATION_MODE_PROCESS, + ], + help="Isolation mode when running a `ServerApp` (`subprocess` by default, " + "possible values: `subprocess`, `process`). Use `subprocess` to configure " + "SuperLink to run a `ServerApp` in a subprocess. Use `process` to indicate " + "that a separate independent process gets created outside of SuperLink.", + ) parser.add_argument( "--database", help="A string representing the path to the database " @@ -650,11 +748,12 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: ) -def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: +def _add_args_serverappio_api(parser: argparse.ArgumentParser) -> None: parser.add_argument( - "--driver-api-address", - help="Driver API (gRPC) server address (IPv4, IPv6, or a domain name).", - default=DRIVER_API_DEFAULT_ADDRESS, + "--serverappio-api-address", + default=SERVERAPPIO_API_DEFAULT_SERVER_ADDRESS, + help="ServerAppIo API (gRPC) server address (IPv4, IPv6, or a domain name). " + f"By default, it is set to {SERVERAPPIO_API_DEFAULT_SERVER_ADDRESS}.", ) @@ -681,3 +780,39 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: type=int, help="Set the number of concurrent workers for the Fleet API server.", ) + + +def _add_args_exec_api(parser: argparse.ArgumentParser) -> None: + """Add command line arguments for Exec API.""" + parser.add_argument( + "--exec-api-address", + help="Exec API server address (IPv4, IPv6, or a domain name) " + f"By default, it is set to {EXEC_API_DEFAULT_SERVER_ADDRESS}.", + default=EXEC_API_DEFAULT_SERVER_ADDRESS, + ) + parser.add_argument( + "--executor", + help="For example: `deployment:exec` or `project.package.module:wrapper.exec`. " + "The default is `flwr.superexec.deployment:executor`", + default="flwr.superexec.deployment:executor", + ) + parser.add_argument( + "--executor-dir", + help="The directory for the executor.", + default=".", + ) + parser.add_argument( + "--executor-config", + help="Key-value pairs for the executor config, separated by spaces. " + "For example:\n\n`--executor-config 'verbose=true " + 'root-certificates="certificates/superlink-ca.crt"\'`', + ) + + +def _add_args_simulationio_api(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--simulationio-api-address", + default=SIMULATIONIO_API_DEFAULT_SERVER_ADDRESS, + help="SimulationIo API (gRPC) server address (IPv4, IPv6, or a domain name)." + f"By default, it is set to {SIMULATIONIO_API_DEFAULT_SERVER_ADDRESS}.", + ) diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index 5a6ee691f3a9..e7176e4515ec 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -24,7 +24,21 @@ class Driver(ABC): - """Abstract base Driver class for the Driver API.""" + """Abstract base Driver class for the ServerAppIo API.""" + + @abstractmethod + def set_run(self, run_id: int) -> None: + """Request a run to the SuperLink with a given `run_id`. + + If a Run with the specified `run_id` exists, a local Run + object will be created. It enables further functionality + in the driver, such as sending `Messages`. + + Parameters + ---------- + run_id : int + The `run_id` of the Run this Driver object operates in. + """ @property @abstractmethod diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index 13c1c4152dad..05b7ce4be8bc 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -17,22 +17,21 @@ import time import warnings from collections.abc import Iterable -from logging import DEBUG, WARNING -from typing import Optional, cast +from logging import DEBUG, INFO, WARN, WARNING +from typing import Any, Optional, cast import grpc from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet -from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS +from flwr.common.constant import MAX_RETRY_DELAY, SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log -from flwr.common.serde import ( - message_from_taskres, - message_to_taskins, - user_config_from_proto, -) +from flwr.common.retry_invoker import RetryInvoker, RetryState, exponential +from flwr.common.serde import message_from_taskres, message_to_taskins, run_from_proto from flwr.common.typing import Run -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.proto.serverappio_pb2 import ( # pylint: disable=E0611 GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -40,9 +39,7 @@ PushTaskInsRequest, PushTaskInsResponse, ) -from flwr.proto.driver_pb2_grpc import DriverStub # pylint: disable=E0611 -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.proto.serverappio_pb2_grpc import ServerAppIoStub # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 from .driver import Driver @@ -56,14 +53,12 @@ class GrpcDriver(Driver): - """`GrpcDriver` provides an interface to the Driver API. + """`GrpcDriver` provides an interface to the ServerAppIo API. Parameters ---------- - run_id : int - The identifier of the run. - driver_service_address : str (default: "[::]:9091") - The address (URL, IPv6, IPv4) of the SuperLink Driver API service. + serverappio_service_address : str (default: "[::]:9091") + The address (URL, IPv6, IPv4) of the SuperLink ServerAppIo API service. root_certificates : Optional[bytes] (default: None) The PEM-encoded root certificates as a byte string. If provided, a secure connection using the certificates will be @@ -72,25 +67,24 @@ class GrpcDriver(Driver): def __init__( # pylint: disable=too-many-arguments self, - run_id: int, - driver_service_address: str = DRIVER_API_DEFAULT_ADDRESS, + serverappio_service_address: str = SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS, root_certificates: Optional[bytes] = None, ) -> None: - self._run_id = run_id - self._addr = driver_service_address + self._addr = serverappio_service_address self._cert = root_certificates self._run: Optional[Run] = None - self._grpc_stub: Optional[DriverStub] = None + self._grpc_stub: Optional[ServerAppIoStub] = None self._channel: Optional[grpc.Channel] = None self.node = Node(node_id=0, anonymous=True) + self._retry_invoker = _make_simple_grpc_retry_invoker() @property def _is_connected(self) -> bool: - """Check if connected to the Driver API server.""" + """Check if connected to the ServerAppIo API server.""" return self._channel is not None def _connect(self) -> None: - """Connect to the Driver API. + """Connect to the ServerAppIo API. This will not call GetRun. """ @@ -102,11 +96,12 @@ def _connect(self) -> None: insecure=(self._cert is None), root_certificates=self._cert, ) - self._grpc_stub = DriverStub(self._channel) + self._grpc_stub = ServerAppIoStub(self._channel) + _wrap_stub(self._grpc_stub, self._retry_invoker) log(DEBUG, "[Driver] Connected to %s", self._addr) def _disconnect(self) -> None: - """Disconnect from the Driver API.""" + """Disconnect from the ServerAppIo API.""" if not self._is_connected: log(DEBUG, "Already disconnected") return @@ -116,41 +111,32 @@ def _disconnect(self) -> None: channel.close() log(DEBUG, "[Driver] Disconnected") - def _init_run(self) -> None: - # Check if is initialized - if self._run is not None: - return + def set_run(self, run_id: int) -> None: + """Set the run.""" # Get the run info - req = GetRunRequest(run_id=self._run_id) + req = GetRunRequest(run_id=run_id) res: GetRunResponse = self._stub.GetRun(req) if not res.HasField("run"): - raise RuntimeError(f"Cannot find the run with ID: {self._run_id}") - self._run = Run( - run_id=res.run.run_id, - fab_id=res.run.fab_id, - fab_version=res.run.fab_version, - fab_hash=res.run.fab_hash, - override_config=user_config_from_proto(res.run.override_config), - ) + raise RuntimeError(f"Cannot find the run with ID: {run_id}") + self._run = run_from_proto(res.run) @property def run(self) -> Run: """Run information.""" - self._init_run() return Run(**vars(self._run)) @property - def _stub(self) -> DriverStub: - """Driver stub.""" + def _stub(self) -> ServerAppIoStub: + """ServerAppIo stub.""" if not self._is_connected: self._connect() - return cast(DriverStub, self._grpc_stub) + return cast(ServerAppIoStub, self._grpc_stub) def _check_message(self, message: Message) -> None: # Check if the message is valid if not ( # Assume self._run being initialized - message.metadata.run_id == self._run_id + message.metadata.run_id == cast(Run, self._run).run_id and message.metadata.src_node_id == self.node.node_id and message.metadata.message_id == "" and message.metadata.reply_to_message == "" @@ -171,7 +157,6 @@ def create_message( # pylint: disable=too-many-arguments,R0917 This method constructs a new `Message` with given content and metadata. The `run_id` and `src_node_id` will be set automatically. """ - self._init_run() if ttl: warnings.warn( "A custom TTL was set, but note that the SuperLink does not enforce " @@ -182,7 +167,7 @@ def create_message( # pylint: disable=too-many-arguments,R0917 ttl_ = DEFAULT_TTL if ttl is None else ttl metadata = Metadata( - run_id=self._run_id, + run_id=cast(Run, self._run).run_id, message_id="", # Will be set by the server src_node_id=self.node.node_id, dst_node_id=dst_node_id, @@ -195,10 +180,9 @@ def create_message( # pylint: disable=too-many-arguments,R0917 def get_node_ids(self) -> list[int]: """Get node IDs.""" - self._init_run() # Call GrpcDriverStub method res: GetNodesResponse = self._stub.GetNodes( - GetNodesRequest(run_id=self._run_id) + GetNodesRequest(run_id=cast(Run, self._run).run_id) ) return [node.node_id for node in res.nodes] @@ -208,7 +192,6 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: This method takes an iterable of messages and sends each message to the node specified in `dst_node_id`. """ - self._init_run() # Construct TaskIns task_ins_list: list[TaskIns] = [] for msg in messages: @@ -230,7 +213,6 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: This method is used to collect messages from the SuperLink that correspond to a set of given message IDs. """ - self._init_run() # Pull TaskRes res: PullTaskResResponse = self._stub.PullTaskRes( PullTaskResRequest(node=self.node, task_ids=message_ids) @@ -276,3 +258,60 @@ def close(self) -> None: return # Disconnect self._disconnect() + + +def _make_simple_grpc_retry_invoker() -> RetryInvoker: + """Create a simple gRPC retry invoker.""" + + def _on_sucess(retry_state: RetryState) -> None: + if retry_state.tries > 1: + log( + INFO, + "Connection successful after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + + def _on_backoff(retry_state: RetryState) -> None: + if retry_state.tries == 1: + log(WARN, "Connection attempt failed, retrying...") + else: + log( + WARN, + "Connection attempt failed, retrying in %.2f seconds", + retry_state.actual_wait, + ) + + def _on_giveup(retry_state: RetryState) -> None: + if retry_state.tries > 1: + log( + WARN, + "Giving up reconnection after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + + return RetryInvoker( + wait_gen_factory=lambda: exponential(max_delay=MAX_RETRY_DELAY), + recoverable_exceptions=grpc.RpcError, + max_tries=None, + max_time=None, + on_success=_on_sucess, + on_backoff=_on_backoff, + on_giveup=_on_giveup, + should_giveup=lambda e: e.code() != grpc.StatusCode.UNAVAILABLE, # type: ignore + ) + + +def _wrap_stub(stub: ServerAppIoStub, retry_invoker: RetryInvoker) -> None: + """Wrap the gRPC stub with a retry invoker.""" + + def make_lambda(original_method: Any) -> Any: + return lambda *args, **kwargs: retry_invoker.invoke( + original_method, *args, **kwargs + ) + + for method_name in vars(stub): + method = getattr(stub, method_name) + if callable(method): + setattr(stub, method_name, make_lambda(method)) diff --git a/src/py/flwr/server/driver/grpc_driver_test.py b/src/py/flwr/server/driver/grpc_driver_test.py index 20017126927d..59d7407f7a2b 100644 --- a/src/py/flwr/server/driver/grpc_driver_test.py +++ b/src/py/flwr/server/driver/grpc_driver_test.py @@ -19,15 +19,21 @@ import unittest from unittest.mock import Mock, patch +import grpc + from flwr.common import DEFAULT_TTL, RecordSet from flwr.common.message import Error from flwr.common.serde import error_to_proto, recordset_to_proto -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + GetRunRequest, + GetRunResponse, + Run, +) +from flwr.proto.serverappio_pb2 import ( # pylint: disable=E0611 GetNodesRequest, PullTaskResRequest, PushTaskInsRequest, ) -from flwr.proto.run_pb2 import Run # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 from .grpc_driver import GrpcDriver @@ -38,21 +44,24 @@ class TestGrpcDriver(unittest.TestCase): def setUp(self) -> None: """Initialize mock GrpcDriverStub and Driver instance before each test.""" - mock_response = Mock( - run=Run( - run_id=61016, - fab_id="mock/mock", - fab_version="v1.0.0", - fab_hash="9f86d08", + + def _mock_fn(req: GetRunRequest) -> GetRunResponse: + return GetRunResponse( + run=Run( + run_id=req.run_id, + fab_id="mock/mock", + fab_version="v1.0.0", + fab_hash="9f86d08", + ) ) - ) + self.mock_stub = Mock() self.mock_channel = Mock() - self.mock_stub.GetRun.return_value = mock_response - mock_response.HasField.return_value = True - self.driver = GrpcDriver(run_id=61016) + self.mock_stub.GetRun.side_effect = _mock_fn + self.driver = GrpcDriver() self.driver._grpc_stub = self.mock_stub # pylint: disable=protected-access self.driver._channel = self.mock_channel # pylint: disable=protected-access + self.driver.set_run(run_id=61016) def test_init_grpc_driver(self) -> None: """Test GrpcDriverStub initialization.""" @@ -208,3 +217,30 @@ def test_del_with_uninitialized_driver(self) -> None: # Assert self.mock_channel.close.assert_not_called() + + def test_simple_retry_mechanism_get_nodes(self) -> None: + """Test retry mechanism with the get_node_ids method.""" + # Prepare + grpc_exc = grpc.RpcError() + grpc_exc.code = lambda: grpc.StatusCode.UNAVAILABLE + mock_get_nodes = Mock() + mock_get_nodes.side_effect = [ + grpc_exc, + Mock(nodes=[Mock(node_id=404)]), + ] + # Make pylint happy + # pylint: disable=protected-access + self.driver._grpc_stub = Mock( + GetNodes=lambda *args, **kwargs: self.driver._retry_invoker.invoke( + mock_get_nodes, *args, **kwargs + ) + ) + # pylint: enable=protected-access + + # Execute + with patch("time.sleep", side_effect=lambda _: None): + node_ids = self.driver.get_node_ids() + + # Assert + self.assertIn(404, node_ids) + self.assertEqual(mock_get_nodes.call_count, 2) diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py index 130562c6defa..d9189002e5ad 100644 --- a/src/py/flwr/server/driver/inmemory_driver.py +++ b/src/py/flwr/server/driver/inmemory_driver.py @@ -25,18 +25,16 @@ from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.common.typing import Run from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory from .driver import Driver class InMemoryDriver(Driver): - """`InMemoryDriver` class provides an interface to the Driver API. + """`InMemoryDriver` class provides an interface to the ServerAppIo API. Parameters ---------- - run_id : int - The identifier of the run. state_factory : StateFactory A StateFactory embedding a state that this driver can interface with. pull_interval : float (default=0.1) @@ -45,18 +43,15 @@ class InMemoryDriver(Driver): def __init__( self, - run_id: int, - state_factory: StateFactory, + state_factory: LinkStateFactory, pull_interval: float = 0.1, ) -> None: - self._run_id = run_id self._run: Optional[Run] = None self.state = state_factory.state() self.pull_interval = pull_interval self.node = Node(node_id=0, anonymous=True) def _check_message(self, message: Message) -> None: - self._init_run() # Check if the message is valid if not ( message.metadata.run_id == cast(Run, self._run).run_id @@ -67,19 +62,16 @@ def _check_message(self, message: Message) -> None: ): raise ValueError(f"Invalid message: {message}") - def _init_run(self) -> None: + def set_run(self, run_id: int) -> None: """Initialize the run.""" - if self._run is not None: - return - run = self.state.get_run(self._run_id) + run = self.state.get_run(run_id) if run is None: - raise RuntimeError(f"Cannot find the run with ID: {self._run_id}") + raise RuntimeError(f"Cannot find the run with ID: {run_id}") self._run = run @property def run(self) -> Run: """Run ID.""" - self._init_run() return Run(**vars(cast(Run, self._run))) def create_message( # pylint: disable=too-many-arguments,R0917 @@ -95,7 +87,6 @@ def create_message( # pylint: disable=too-many-arguments,R0917 This method constructs a new `Message` with given content and metadata. The `run_id` and `src_node_id` will be set automatically. """ - self._init_run() if ttl: warnings.warn( "A custom TTL was set, but note that the SuperLink does not enforce " @@ -119,7 +110,6 @@ def create_message( # pylint: disable=too-many-arguments,R0917 def get_node_ids(self) -> list[int]: """Get node IDs.""" - self._init_run() return list(self.state.get_nodes(cast(Run, self._run).run_id)) def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: diff --git a/src/py/flwr/server/driver/inmemory_driver_test.py b/src/py/flwr/server/driver/inmemory_driver_test.py index 9e5aaeaa9ca7..d755454391e6 100644 --- a/src/py/flwr/server/driver/inmemory_driver_test.py +++ b/src/py/flwr/server/driver/inmemory_driver_test.py @@ -21,8 +21,8 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 -from flwr.common import RecordSet -from flwr.common.constant import NODE_ID_NUM_BYTES, PING_MAX_INTERVAL +from flwr.common import ConfigsRecord, RecordSet, now +from flwr.common.constant import NODE_ID_NUM_BYTES, PING_MAX_INTERVAL, Status from flwr.common.message import Error from flwr.common.serde import ( error_to_proto, @@ -30,10 +30,14 @@ message_to_taskres, recordset_to_proto, ) -from flwr.common.typing import Run +from flwr.common.typing import Run, RunStatus from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import InMemoryState, SqliteState, StateFactory -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate import ( + InMemoryLinkState, + LinkStateFactory, + SqliteLinkState, +) +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .inmemory_driver import InMemoryDriver @@ -41,9 +45,8 @@ def push_messages(driver: InMemoryDriver, num_nodes: int) -> tuple[Iterable[str], int]: """Help push messages to state.""" for _ in range(num_nodes): - driver.state.create_node(ping_interval=PING_MAX_INTERVAL) + node_id = driver.state.create_node(ping_interval=PING_MAX_INTERVAL) num_messages = 3 - node_id = 1 msgs = [ driver.create_message(RecordSet(), "message_type", node_id, "") for _ in range(num_messages) @@ -91,9 +94,15 @@ def setUp(self) -> None: fab_version="v1.0.0", fab_hash="9f86d08", override_config={"test_key": "test_value"}, + pending_at=now().isoformat(), + starting_at="", + running_at="", + finished_at="", + status=RunStatus(status=Status.PENDING, sub_status="", details=""), ) state_factory = MagicMock(state=lambda: self.state) - self.driver = InMemoryDriver(run_id=61016, state_factory=state_factory) + self.driver = InMemoryDriver(state_factory=state_factory) + self.driver.set_run(run_id=61016) self.driver.state = self.state def test_get_run(self) -> None: @@ -227,12 +236,12 @@ def test_send_and_receive_messages_timeout(self) -> None: def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: """Test tasks are deleted in sqlite state once messages are pulled.""" # Prepare - state = StateFactory("").state() - self.driver = InMemoryDriver( - state.create_run("", "", "", {}), MagicMock(state=lambda: state) - ) + state = LinkStateFactory("").state() + run_id = state.create_run("", "", "", {}, ConfigsRecord()) + self.driver = InMemoryDriver(MagicMock(state=lambda: state)) + self.driver.set_run(run_id=run_id) msg_ids, node_id = push_messages(self.driver, self.num_nodes) - assert isinstance(state, SqliteState) + assert isinstance(state, SqliteLinkState) # Check recorded task_ins = state.query("SELECT * FROM task_ins;") @@ -253,11 +262,13 @@ def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: def test_task_store_consistency_after_push_pull_inmemory_state(self) -> None: """Test tasks are deleted in in-memory state once messages are pulled.""" # Prepare - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") state = state_factory.state() - self.driver = InMemoryDriver(state.create_run("", "", "", {}), state_factory) + run_id = state.create_run("", "", "", {}, ConfigsRecord()) + self.driver = InMemoryDriver(state_factory) + self.driver.set_run(run_id=run_id) msg_ids, node_id = push_messages(self.driver, self.num_nodes) - assert isinstance(state, InMemoryState) + assert isinstance(state, InMemoryLinkState) # Check recorded self.assertEqual(len(state.task_ins_store), len(list(msg_ids))) @@ -266,6 +277,6 @@ def test_task_store_consistency_after_push_pull_inmemory_state(self) -> None: reply_tos = get_replies(self.driver, msg_ids, node_id) # Assert - self.assertEqual(reply_tos, msg_ids) + self.assertEqual(set(reply_tos), set(msg_ids)) self.assertEqual(len(state.task_res_store), 0) self.assertEqual(len(state.task_ins_store), 0) diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 28a66e136639..23d4102e77dd 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -15,44 +15,25 @@ """Run ServerApp.""" -import argparse import sys -from logging import DEBUG, INFO, WARN -from pathlib import Path +from logging import DEBUG, ERROR from typing import Optional -from flwr.cli.config_utils import get_fab_metadata -from flwr.cli.install import install_from_fab -from flwr.common import Context, EventType, RecordSet, event -from flwr.common.config import ( - get_flwr_dir, - get_fused_config_from_dir, - get_metadata_from_config, - get_project_config, - get_project_dir, -) -from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS -from flwr.common.logger import log, update_console_handler, warn_deprecated_feature +from flwr.common import Context +from flwr.common.logger import log, warn_unsupported_feature from flwr.common.object_ref import load_app -from flwr.common.typing import UserConfig -from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 -from flwr.proto.run_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - CreateRunResponse, -) from .driver import Driver -from .driver.grpc_driver import GrpcDriver from .server_app import LoadServerAppError, ServerApp def run( driver: Driver, + context: Context, server_app_dir: str, - server_app_run_config: UserConfig, server_app_attr: Optional[str] = None, loaded_server_app: Optional[ServerApp] = None, -) -> None: +) -> Context: """Run ServerApp with a given Driver.""" if not (server_app_attr is None) ^ (loaded_server_app is None): raise ValueError( @@ -78,224 +59,19 @@ def _load() -> ServerApp: server_app = _load() - # Initialize Context - context = Context( - node_id=0, node_config={}, state=RecordSet(), run_config=server_app_run_config - ) - # Call ServerApp server_app(driver=driver, context=context) log(DEBUG, "ServerApp finished running.") + return context # pylint: disable-next=too-many-branches,too-many-statements,too-many-locals def run_server_app() -> None: """Run Flower server app.""" - event(EventType.RUN_SERVER_APP_ENTER) - - args = _parse_args_run_server_app().parse_args() - - # Check if the server app reference is passed. - # Since Flower 1.11, passing a reference is not allowed. - app_path: Optional[str] = args.app - # If the provided app_path doesn't exist, and contains a ":", - # it is likely to be a server app reference instead of a path. - if app_path is not None and not Path(app_path).exists() and ":" in app_path: - sys.exit( - "It appears you've passed a reference like `server:app`.\n\n" - "Note that since version `1.11.0`, `flower-server-app` no longer supports " - "passing a reference to a `ServerApp` attribute. Instead, you need to pass " - "the path to Flower app via the argument `--app`. This is the path to a " - "directory containing a `pyproject.toml`. You can create a valid Flower " - "app by executing `flwr new` and following the prompt." - ) - - if args.server != DRIVER_API_DEFAULT_ADDRESS: - warn = "Passing flag --server is deprecated. Use --superlink instead." - warn_deprecated_feature(warn) - - if args.superlink != DRIVER_API_DEFAULT_ADDRESS: - # if `--superlink` also passed, then - # warn user that this argument overrides what was passed with `--server` - log( - WARN, - "Both `--server` and `--superlink` were passed. " - "`--server` will be ignored. Connecting to the Superlink Driver API " - "at %s.", - args.superlink, - ) - else: - args.superlink = args.server - - update_console_handler( - level=DEBUG if args.verbose else INFO, - timestamps=args.verbose, - colored=True, - ) - - # Obtain certificates - if args.insecure: - if args.root_certificates is not None: - sys.exit( - "Conflicting options: The '--insecure' flag disables HTTPS, " - "but '--root-certificates' was also specified. Please remove " - "the '--root-certificates' option when running in insecure mode, " - "or omit '--insecure' to use HTTPS." - ) - log( - WARN, - "Option `--insecure` was set. " - "Starting insecure HTTP client connected to %s.", - args.superlink, - ) - root_certificates = None - else: - # Load the certificates if provided, or load the system certificates - cert_path = args.root_certificates - if cert_path is None: - root_certificates = None - else: - root_certificates = Path(cert_path).read_bytes() - log( - DEBUG, - "Starting secure HTTPS client connected to %s " - "with the following certificates: %s.", - args.superlink, - cert_path, - ) - - if not (app_path is None) ^ (args.run_id is None): - raise sys.exit( - "Please provide either a Flower App path or a Run ID, but not both. " - "For more details, use: ``flower-server-app -h``" - ) - - # Initialize GrpcDriver - if app_path is None: - # User provided `--run-id`, but not `app_dir` - driver = GrpcDriver( - run_id=args.run_id, - driver_service_address=args.superlink, - root_certificates=root_certificates, - ) - flwr_dir = get_flwr_dir(args.flwr_dir) - run_ = driver.run - if not run_.fab_hash: - raise ValueError("FAB hash not provided.") - fab_req = GetFabRequest(hash_str=run_.fab_hash) - # pylint: disable-next=W0212 - fab_res: GetFabResponse = driver._stub.GetFab(fab_req) - if fab_res.fab.hash_str != run_.fab_hash: - raise ValueError("FAB hashes don't match.") - install_from_fab(fab_res.fab.content, flwr_dir, True) - fab_id, fab_version = get_fab_metadata(fab_res.fab.content) - - app_path = str(get_project_dir(fab_id, fab_version, run_.fab_hash, flwr_dir)) - config = get_project_config(app_path) - else: - # User provided `app_dir`, but not `--run-id` - # Create run if run_id is not provided - driver = GrpcDriver( - run_id=0, # Will be overwritten - driver_service_address=args.superlink, - root_certificates=root_certificates, - ) - # Load config from the project directory - config = get_project_config(app_path) - fab_version, fab_id = get_metadata_from_config(config) - - # Create run - req = CreateRunRequest(fab_id=fab_id, fab_version=fab_version) - res: CreateRunResponse = driver._stub.CreateRun(req) # pylint: disable=W0212 - # Overwrite driver._run_id - driver._run_id = res.run_id # pylint: disable=W0212 - - # Obtain server app reference and the run config - server_app_attr = config["tool"]["flwr"]["app"]["components"]["serverapp"] - server_app_run_config = get_fused_config_from_dir( - Path(app_path), driver.run.override_config + warn_unsupported_feature( + "The command `flower-server-app` is deprecated and no longer in use. " + "Use the `flwr-serverapp` exclusively instead." ) - - log(DEBUG, "Flower will load ServerApp `%s` in %s", server_app_attr, app_path) - - log( - DEBUG, - "root_certificates: `%s`", - root_certificates, - ) - - # Run the ServerApp with the Driver - run( - driver=driver, - server_app_dir=app_path, - server_app_run_config=server_app_run_config, - server_app_attr=server_app_attr, - ) - - # Clean up - driver.close() - - event(EventType.RUN_SERVER_APP_LEAVE) - - -def _parse_args_run_server_app() -> argparse.ArgumentParser: - """Parse flower-server-app command line arguments.""" - parser = argparse.ArgumentParser( - description="Start a Flower server app", - ) - - parser.add_argument( - "app", - nargs="?", - default=None, - help="Load and run the `ServerApp` from the specified Flower App path. " - "The `pyproject.toml` file must be located in the root of this path.", - ) - parser.add_argument( - "--insecure", - action="store_true", - help="Run the `ServerApp` without HTTPS. By default, the app runs with " - "HTTPS enabled. Use this flag only if you understand the risks.", - ) - parser.add_argument( - "--verbose", - action="store_true", - help="Set the logging to `DEBUG`.", - ) - parser.add_argument( - "--root-certificates", - metavar="ROOT_CERT", - type=str, - help="Specifies the path to the PEM-encoded root certificate file for " - "establishing secure HTTPS connections.", - ) - parser.add_argument( - "--server", - default=DRIVER_API_DEFAULT_ADDRESS, - help="Server address", - ) - parser.add_argument( - "--superlink", - default=DRIVER_API_DEFAULT_ADDRESS, - help="SuperLink Driver API (gRPC-rere) address (IPv4, IPv6, or a domain name)", - ) - parser.add_argument( - "--run-id", - default=None, - type=int, - help="The identifier of the run.", - ) - parser.add_argument( - "--flwr-dir", - default=None, - help="""The path containing installed Flower Apps. - By default, this value is equal to: - - - `$FLWR_HOME/` if `$FLWR_HOME` is defined - - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined - - `$HOME/.flwr/` in all other cases - """, - ) - - return parser + log(ERROR, "`flower-server-app` used.") + sys.exit() diff --git a/src/py/flwr/server/server_app_test.py b/src/py/flwr/server/server_app_test.py index b0672b3202ed..b2515f09fdac 100644 --- a/src/py/flwr/server/server_app_test.py +++ b/src/py/flwr/server/server_app_test.py @@ -29,7 +29,9 @@ def test_server_app_custom_mode() -> None: # Prepare app = ServerApp() driver = MagicMock() - context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) + context = Context( + run_id=1, node_id=0, node_config={}, state=RecordSet(), run_config={} + ) called = {"called": False} diff --git a/src/py/flwr/server/superlink/state/__init__.py b/src/py/flwr/server/serverapp/__init__.py similarity index 69% rename from src/py/flwr/server/superlink/state/__init__.py rename to src/py/flwr/server/serverapp/__init__.py index 9d3bd220403b..2873438e3c60 100644 --- a/src/py/flwr/server/superlink/state/__init__.py +++ b/src/py/flwr/server/serverapp/__init__.py @@ -12,17 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower server state.""" +"""Flower AppIO service.""" -from .in_memory_state import InMemoryState as InMemoryState -from .sqlite_state import SqliteState as SqliteState -from .state import State as State -from .state_factory import StateFactory as StateFactory +from .app import flwr_serverapp as flwr_serverapp __all__ = [ - "InMemoryState", - "SqliteState", - "State", - "StateFactory", + "flwr_serverapp", ] diff --git a/src/py/flwr/server/serverapp/app.py b/src/py/flwr/server/serverapp/app.py new file mode 100644 index 000000000000..0ed9cc4d0928 --- /dev/null +++ b/src/py/flwr/server/serverapp/app.py @@ -0,0 +1,234 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ServerApp process.""" + +import argparse +import sys +from logging import DEBUG, ERROR, INFO +from pathlib import Path +from queue import Queue +from time import sleep +from typing import Optional + +from flwr.cli.config_utils import get_fab_metadata +from flwr.cli.install import install_from_fab +from flwr.common.args import add_args_flwr_app_common +from flwr.common.config import ( + get_flwr_dir, + get_fused_config_from_dir, + get_project_config, + get_project_dir, +) +from flwr.common.constant import ( + SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS, + Status, + SubStatus, +) +from flwr.common.logger import ( + log, + mirror_output_to_queue, + restore_output, + start_log_uploader, + stop_log_uploader, +) +from flwr.common.serde import ( + context_from_proto, + context_to_proto, + fab_from_proto, + run_from_proto, + run_status_to_proto, +) +from flwr.common.typing import RunStatus +from flwr.proto.run_pb2 import UpdateRunStatusRequest # pylint: disable=E0611 +from flwr.proto.serverappio_pb2 import ( # pylint: disable=E0611 + PullServerAppInputsRequest, + PullServerAppInputsResponse, + PushServerAppOutputsRequest, +) +from flwr.server.driver.grpc_driver import GrpcDriver +from flwr.server.run_serverapp import run as run_ + + +def flwr_serverapp() -> None: + """Run process-isolated Flower ServerApp.""" + # Capture stdout/stderr + log_queue: Queue[Optional[str]] = Queue() + mirror_output_to_queue(log_queue) + + args = _parse_args_run_flwr_serverapp().parse_args() + + log(INFO, "Starting Flower ServerApp") + + if not args.insecure: + log( + ERROR, + "`flwr-serverapp` does not support TLS yet. " + "Please use the '--insecure' flag.", + ) + sys.exit(1) + + log( + DEBUG, + "Starting isolated `ServerApp` connected to SuperLink's ServerAppIo API at %s", + args.serverappio_api_address, + ) + run_serverapp( + serverappio_api_address=args.serverappio_api_address, + log_queue=log_queue, + run_once=args.run_once, + flwr_dir=args.flwr_dir, + certificates=None, + ) + + # Restore stdout/stderr + restore_output() + + +def run_serverapp( # pylint: disable=R0914, disable=W0212 + serverappio_api_address: str, + log_queue: Queue[Optional[str]], + run_once: bool, + flwr_dir: Optional[str] = None, + certificates: Optional[bytes] = None, +) -> None: + """Run Flower ServerApp process.""" + driver = GrpcDriver( + serverappio_service_address=serverappio_api_address, + root_certificates=certificates, + ) + + # Resolve directory where FABs are installed + flwr_dir_ = get_flwr_dir(flwr_dir) + log_uploader = None + + while True: + + try: + # Pull ServerAppInputs from LinkState + req = PullServerAppInputsRequest() + res: PullServerAppInputsResponse = driver._stub.PullServerAppInputs(req) + if not res.HasField("run"): + sleep(3) + run_status = None + continue + + context = context_from_proto(res.context) + run = run_from_proto(res.run) + fab = fab_from_proto(res.fab) + + driver.set_run(run.run_id) + + # Start log uploader for this run + log_uploader = start_log_uploader( + log_queue=log_queue, + node_id=0, + run_id=run.run_id, + stub=driver._stub, + ) + + log(DEBUG, "ServerApp process starts FAB installation.") + install_from_fab(fab.content, flwr_dir=flwr_dir_, skip_prompt=True) + + fab_id, fab_version = get_fab_metadata(fab.content) + + app_path = str( + get_project_dir(fab_id, fab_version, fab.hash_str, flwr_dir_) + ) + config = get_project_config(app_path) + + # Obtain server app reference and the run config + server_app_attr = config["tool"]["flwr"]["app"]["components"]["serverapp"] + server_app_run_config = get_fused_config_from_dir( + Path(app_path), run.override_config + ) + + # Update run_config in context + context.run_config = server_app_run_config + + log( + DEBUG, + "Flower will load ServerApp `%s` in %s", + server_app_attr, + app_path, + ) + + # Change status to Running + run_status_proto = run_status_to_proto(RunStatus(Status.RUNNING, "", "")) + driver._stub.UpdateRunStatus( + UpdateRunStatusRequest(run_id=run.run_id, run_status=run_status_proto) + ) + + # Load and run the ServerApp with the Driver + updated_context = run_( + driver=driver, + server_app_dir=app_path, + server_app_attr=server_app_attr, + context=context, + ) + + # Send resulting context + context_proto = context_to_proto(updated_context) + out_req = PushServerAppOutputsRequest( + run_id=run.run_id, context=context_proto + ) + _ = driver._stub.PushServerAppOutputs(out_req) + + run_status = RunStatus(Status.FINISHED, SubStatus.COMPLETED, "") + + except Exception as ex: # pylint: disable=broad-exception-caught + exc_entity = "ServerApp" + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + run_status = RunStatus(Status.FINISHED, SubStatus.FAILED, str(ex)) + + finally: + # Stop log uploader for this run and upload final logs + if log_uploader: + stop_log_uploader(log_queue, log_uploader) + log_uploader = None + + # Update run status + if run_status: + run_status_proto = run_status_to_proto(run_status) + driver._stub.UpdateRunStatus( + UpdateRunStatusRequest( + run_id=run.run_id, run_status=run_status_proto + ) + ) + + # Stop the loop if `flwr-serverapp` is expected to process a single run + if run_once: + break + + +def _parse_args_run_flwr_serverapp() -> argparse.ArgumentParser: + """Parse flwr-serverapp command line arguments.""" + parser = argparse.ArgumentParser( + description="Run a Flower ServerApp", + ) + parser.add_argument( + "--serverappio-api-address", + default=SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS, + type=str, + help="Address of SuperLink's ServerAppIo API (IPv4, IPv6, or a domain name)." + f"By default, it is set to {SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS}.", + ) + parser.add_argument( + "--run-once", + action="store_true", + help="When set, this process will start a single ServerApp for a pending Run. " + "If there is no pending Run, the process will exit.", + ) + add_args_flwr_app_common(parser=parser) + return parser diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index 94beacba0087..16a6c1faba96 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -48,12 +48,12 @@ def aggregate_inplace(results: list[tuple[ClientProxy, FitRes]]) -> NDArrays: num_examples_total = sum(fit_res.num_examples for (_, fit_res) in results) # Compute scaling factors for each result - scaling_factors = [ - fit_res.num_examples / num_examples_total for _, fit_res in results - ] + scaling_factors = np.asarray( + [fit_res.num_examples / num_examples_total for _, fit_res in results] + ) def _try_inplace( - x: NDArray, y: Union[NDArray, float], np_binary_op: np.ufunc + x: NDArray, y: Union[NDArray, np.float64], np_binary_op: np.ufunc ) -> NDArray: return ( # type: ignore[no-any-return] np_binary_op(x, y, out=x) diff --git a/src/py/flwr/server/strategy/fedadam.py b/src/py/flwr/server/strategy/fedadam.py index d0f87a43f79b..83f4a1c6fa5c 100644 --- a/src/py/flwr/server/strategy/fedadam.py +++ b/src/py/flwr/server/strategy/fedadam.py @@ -170,8 +170,18 @@ def aggregate_fit( for x, y in zip(self.v_t, delta_t) ] + # Compute the bias-corrected learning rate, `eta_norm` for improving convergence + # in the early rounds of FL training. This `eta_norm` is `\alpha_t` in Kingma & + # Ba, 2014 (http://arxiv.org/abs/1412.6980) "Adam: A Method for Stochastic + # Optimization" in the formula line right before Section 2.1. + eta_norm = ( + self.eta + * np.sqrt(1 - np.power(self.beta_2, server_round + 1.0)) + / (1 - np.power(self.beta_1, server_round + 1.0)) + ) + new_weights = [ - x + self.eta * y / (np.sqrt(z) + self.tau) + x + eta_norm * y / (np.sqrt(z) + self.tau) for x, y, z in zip(self.current_weights, self.m_t, self.v_t) ] diff --git a/src/py/flwr/server/superlink/driver/__init__.py b/src/py/flwr/server/superlink/driver/__init__.py index 58fbc479478f..995c6d8ad004 100644 --- a/src/py/flwr/server/superlink/driver/__init__.py +++ b/src/py/flwr/server/superlink/driver/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service.""" +"""Flower ServerAppIo service.""" diff --git a/src/py/flwr/server/superlink/driver/driver_grpc.py b/src/py/flwr/server/superlink/driver/serverappio_grpc.py similarity index 59% rename from src/py/flwr/server/superlink/driver/driver_grpc.py rename to src/py/flwr/server/superlink/driver/serverappio_grpc.py index 70354387812e..2dcb8dee4c9b 100644 --- a/src/py/flwr/server/superlink/driver/driver_grpc.py +++ b/src/py/flwr/server/superlink/driver/serverappio_grpc.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Driver gRPC API.""" +"""ServerAppIo gRPC API.""" from logging import INFO from typing import Optional @@ -21,37 +21,40 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.logger import log -from flwr.proto.driver_pb2_grpc import ( # pylint: disable=E0611 - add_DriverServicer_to_server, +from flwr.proto.serverappio_pb2_grpc import ( # pylint: disable=E0611 + add_ServerAppIoServicer_to_server, ) from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory from ..fleet.grpc_bidi.grpc_server import generic_create_grpc_server -from .driver_servicer import DriverServicer +from .serverappio_servicer import ServerAppIoServicer -def run_driver_api_grpc( +def run_serverappio_api_grpc( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: - """Run Driver API (gRPC, request-response).""" - # Create Driver API gRPC server - driver_servicer: grpc.Server = DriverServicer( + """Run ServerAppIo API (gRPC, request-response).""" + # Create ServerAppIo API gRPC server + serverappio_servicer: grpc.Server = ServerAppIoServicer( state_factory=state_factory, ffs_factory=ffs_factory, ) - driver_add_servicer_to_server_fn = add_DriverServicer_to_server - driver_grpc_server = generic_create_grpc_server( - servicer_and_add_fn=(driver_servicer, driver_add_servicer_to_server_fn), + serverappio_add_servicer_to_server_fn = add_ServerAppIoServicer_to_server + serverappio_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=( + serverappio_servicer, + serverappio_add_servicer_to_server_fn, + ), server_address=address, max_message_length=GRPC_MAX_MESSAGE_LENGTH, certificates=certificates, ) - log(INFO, "Flower ECE: Starting Driver API (gRPC-rere) on %s", address) - driver_grpc_server.start() + log(INFO, "Flower ECE: Starting ServerAppIo API (gRPC-rere) on %s", address) + serverappio_grpc_server.start() - return driver_grpc_server + return serverappio_grpc_server diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/serverappio_servicer.py similarity index 53% rename from src/py/flwr/server/superlink/driver/driver_servicer.py rename to src/py/flwr/server/superlink/driver/serverappio_servicer.py index 72c0d110ac14..e1820fee0659 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/serverappio_servicer.py @@ -12,62 +12,80 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Driver API servicer.""" +"""ServerAppIo API servicer.""" +import threading import time -from logging import DEBUG +from logging import DEBUG, INFO from typing import Optional from uuid import UUID import grpc +from flwr.common import ConfigsRecord +from flwr.common.constant import Status from flwr.common.logger import log from flwr.common.serde import ( + context_from_proto, + context_to_proto, fab_from_proto, fab_to_proto, + run_status_from_proto, + run_to_proto, user_config_from_proto, - user_config_to_proto, -) -from flwr.common.typing import Fab -from flwr.proto import driver_pb2_grpc # pylint: disable=E0611 -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - GetNodesRequest, - GetNodesResponse, - PullTaskResRequest, - PullTaskResResponse, - PushTaskInsRequest, - PushTaskInsResponse, ) +from flwr.common.typing import Fab, RunStatus +from flwr.proto import serverappio_pb2_grpc # pylint: disable=E0611 from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.log_pb2 import ( # pylint: disable=E0611 + PushLogsRequest, + PushLogsResponse, +) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, GetRunRequest, GetRunResponse, - Run, + UpdateRunStatusRequest, + UpdateRunStatusResponse, +) +from flwr.proto.serverappio_pb2 import ( # pylint: disable=E0611 + GetNodesRequest, + GetNodesResponse, + PullServerAppInputsRequest, + PullServerAppInputsResponse, + PullTaskResRequest, + PullTaskResResponse, + PushServerAppOutputsRequest, + PushServerAppOutputsResponse, + PushTaskInsRequest, + PushTaskInsResponse, ) from flwr.proto.task_pb2 import TaskRes # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state import State, StateFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory from flwr.server.utils.validator import validate_task_ins_or_res -class DriverServicer(driver_pb2_grpc.DriverServicer): - """Driver API servicer.""" +class ServerAppIoServicer(serverappio_pb2_grpc.ServerAppIoServicer): + """ServerAppIo API servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory + self.lock = threading.RLock() def GetNodes( self, request: GetNodesRequest, context: grpc.ServicerContext ) -> GetNodesResponse: """Get available nodes.""" - log(DEBUG, "DriverServicer.GetNodes") - state: State = self.state_factory.state() + log(DEBUG, "ServerAppIoServicer.GetNodes") + state: LinkState = self.state_factory.state() all_ids: set[int] = state.get_nodes(request.run_id) nodes: list[Node] = [ Node(node_id=node_id, anonymous=False) for node_id in all_ids @@ -78,8 +96,8 @@ def CreateRun( self, request: CreateRunRequest, context: grpc.ServicerContext ) -> CreateRunResponse: """Create run ID.""" - log(DEBUG, "DriverServicer.CreateRun") - state: State = self.state_factory.state() + log(DEBUG, "ServerAppIoServicer.CreateRun") + state: LinkState = self.state_factory.state() if request.HasField("fab"): fab = fab_from_proto(request.fab) ffs: Ffs = self.ffs_factory.ffs() @@ -95,6 +113,7 @@ def CreateRun( request.fab_version, fab_hash, user_config_from_proto(request.override_config), + ConfigsRecord(), ) return CreateRunResponse(run_id=run_id) @@ -102,7 +121,7 @@ def PushTaskIns( self, request: PushTaskInsRequest, context: grpc.ServicerContext ) -> PushTaskInsResponse: """Push a set of TaskIns.""" - log(DEBUG, "DriverServicer.PushTaskIns") + log(DEBUG, "ServerAppIoServicer.PushTaskIns") # Set pushed_at (timestamp in seconds) pushed_at = time.time() @@ -116,7 +135,7 @@ def PushTaskIns( _raise_if(bool(validation_errors), ", ".join(validation_errors)) # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Store each TaskIns task_ids: list[Optional[UUID]] = [] @@ -132,17 +151,20 @@ def PullTaskRes( self, request: PullTaskResRequest, context: grpc.ServicerContext ) -> PullTaskResResponse: """Pull a set of TaskRes.""" - log(DEBUG, "DriverServicer.PullTaskRes") + log(DEBUG, "ServerAppIoServicer.PullTaskRes") # Convert each task_id str to UUID task_ids: set[UUID] = {UUID(task_id) for task_id in request.task_ids} # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Register callback def on_rpc_done() -> None: - log(DEBUG, "DriverServicer.PullTaskRes callback: delete TaskIns/TaskRes") + log( + DEBUG, + "ServerAppIoServicer.PullTaskRes callback: delete TaskIns/TaskRes", + ) if context.is_active(): return @@ -164,10 +186,10 @@ def GetRun( self, request: GetRunRequest, context: grpc.ServicerContext ) -> GetRunResponse: """Get run information.""" - log(DEBUG, "DriverServicer.GetRun") + log(DEBUG, "ServerAppIoServicer.GetRun") # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Retrieve run information run = state.get_run(request.run_id) @@ -175,21 +197,13 @@ def GetRun( if run is None: return GetRunResponse() - return GetRunResponse( - run=Run( - run_id=run.run_id, - fab_id=run.fab_id, - fab_version=run.fab_version, - override_config=user_config_to_proto(run.override_config), - fab_hash=run.fab_hash, - ) - ) + return GetRunResponse(run=run_to_proto(run)) def GetFab( self, request: GetFabRequest, context: grpc.ServicerContext ) -> GetFabResponse: """Get FAB from Ffs.""" - log(DEBUG, "DriverServicer.GetFab") + log(DEBUG, "ServerAppIoServicer.GetFab") ffs: Ffs = self.ffs_factory.ffs() if result := ffs.get(request.hash_str): @@ -198,6 +212,78 @@ def GetFab( raise ValueError(f"Found no FAB with hash: {request.hash_str}") + def PullServerAppInputs( + self, request: PullServerAppInputsRequest, context: grpc.ServicerContext + ) -> PullServerAppInputsResponse: + """Pull ServerApp process inputs.""" + log(DEBUG, "ServerAppIoServicer.PullServerAppInputs") + # Init access to LinkState and Ffs + state = self.state_factory.state() + ffs = self.ffs_factory.ffs() + + # Lock access to LinkState, preventing obtaining the same pending run_id + with self.lock: + # Attempt getting the run_id of a pending run + run_id = state.get_pending_run_id() + # If there's no pending run, return an empty response + if run_id is None: + return PullServerAppInputsResponse() + + # Retrieve Context, Run and Fab for the run_id + serverapp_ctxt = state.get_serverapp_context(run_id) + run = state.get_run(run_id) + fab = None + if run and run.fab_hash: + if result := ffs.get(run.fab_hash): + fab = Fab(run.fab_hash, result[0]) + if run and fab and serverapp_ctxt: + # Update run status to STARTING + if state.update_run_status(run_id, RunStatus(Status.STARTING, "", "")): + log(INFO, "Starting run %d", run_id) + return PullServerAppInputsResponse( + context=context_to_proto(serverapp_ctxt), + run=run_to_proto(run), + fab=fab_to_proto(fab), + ) + + # Raise an exception if the Run or Fab is not found, + # or if the status cannot be updated to STARTING + raise RuntimeError(f"Failed to start run {run_id}") + + def PushServerAppOutputs( + self, request: PushServerAppOutputsRequest, context: grpc.ServicerContext + ) -> PushServerAppOutputsResponse: + """Push ServerApp process outputs.""" + log(DEBUG, "ServerAppIoServicer.PushServerAppOutputs") + state = self.state_factory.state() + state.set_serverapp_context(request.run_id, context_from_proto(request.context)) + return PushServerAppOutputsResponse() + + def UpdateRunStatus( + self, request: UpdateRunStatusRequest, context: grpc.ServicerContext + ) -> UpdateRunStatusResponse: + """Update the status of a run.""" + log(DEBUG, "ControlServicer.UpdateRunStatus") + state = self.state_factory.state() + + # Update the run status + state.update_run_status( + run_id=request.run_id, new_status=run_status_from_proto(request.run_status) + ) + return UpdateRunStatusResponse() + + def PushLogs( + self, request: PushLogsRequest, context: grpc.ServicerContext + ) -> PushLogsResponse: + """Push logs.""" + log(DEBUG, "ServerAppIoServicer.PushLogs") + state = self.state_factory.state() + + # Add logs to LinkState + merged_logs = "".join(request.logs) + state.add_serverapp_log(request.run_id, merged_logs) + return PushLogsResponse() + def _raise_if(validation_error: bool, detail: str) -> None: if validation_error: diff --git a/src/py/flwr/server/superlink/driver/driver_servicer_test.py b/src/py/flwr/server/superlink/driver/serverappio_servicer_test.py similarity index 93% rename from src/py/flwr/server/superlink/driver/driver_servicer_test.py rename to src/py/flwr/server/superlink/driver/serverappio_servicer_test.py index 394d6be7ee6a..a498bdabc002 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer_test.py +++ b/src/py/flwr/server/superlink/driver/serverappio_servicer_test.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""DriverServicer tests.""" +"""ServerAppIoServicer tests.""" -from flwr.server.superlink.driver.driver_servicer import _raise_if +from flwr.server.superlink.driver.serverappio_servicer import _raise_if # pylint: disable=broad-except diff --git a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py index 75aa6d370511..ffef57d89e8c 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py @@ -48,7 +48,7 @@ from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory T = TypeVar("T", bound=GrpcMessage) @@ -77,7 +77,9 @@ def _handle( class GrpcAdapterServicer(grpcadapter_pb2_grpc.GrpcAdapterServicer): """Fleet API via GrpcAdapter servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index 9d2e13d5b107..c3358fa53211 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -30,7 +30,7 @@ add_FlowerServiceServicer_to_server, ) from flwr.server.client_manager import ClientManager -from flwr.server.superlink.driver.driver_servicer import DriverServicer +from flwr.server.superlink.driver.serverappio_servicer import ServerAppIoServicer from flwr.server.superlink.fleet.grpc_adapter.grpc_adapter_servicer import ( GrpcAdapterServicer, ) @@ -161,7 +161,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments,R0917 tuple[FleetServicer, AddServicerToServerFn], tuple[GrpcAdapterServicer, AddServicerToServerFn], tuple[FlowerServiceServicer, AddServicerToServerFn], - tuple[DriverServicer, AddServicerToServerFn], + tuple[ServerAppIoServicer, AddServicerToServerFn], ], server_address: str, max_concurrent_workers: int = 1000, diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index 02e34e0bba02..dacbab135057 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -37,13 +37,15 @@ from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory class FleetServicer(fleet_pb2_grpc.FleetServicer): """Fleet API servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py index 855fab353ae6..c07ee0788493 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -45,7 +45,7 @@ ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkStateFactory _PUBLIC_KEY_HEADER = "public-key" _AUTH_TOKEN_HEADER = "auth-token" @@ -84,15 +84,16 @@ def _get_value_from_tuples( class AuthenticateServerInterceptor(grpc.ServerInterceptor): # type: ignore """Server interceptor for node authentication.""" - def __init__(self, state: State): - self.state = state + def __init__(self, state_factory: LinkStateFactory): + self.state_factory = state_factory + state = self.state_factory.state() self.node_public_keys = state.get_node_public_keys() if len(self.node_public_keys) == 0: log(WARNING, "Authentication enabled, but no known public keys configured") - private_key = self.state.get_server_private_key() - public_key = self.state.get_server_public_key() + private_key = state.get_server_private_key() + public_key = state.get_server_public_key() if private_key is None or public_key is None: raise ValueError("Error loading authentication keys") @@ -154,7 +155,7 @@ def _generic_method_handler( context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") # Verify node_id - node_id = self.state.get_node_id(node_public_key_bytes) + node_id = self.state_factory.state().get_node_id(node_public_key_bytes) if not self._verify_node_id(node_id, request): context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") @@ -186,7 +187,7 @@ def _verify_node_id( return False return request.task_res_list[0].task.producer.node_id == node_id if isinstance(request, GetRunRequest): - return node_id in self.state.get_nodes(request.run_id) + return node_id in self.state_factory.state().get_nodes(request.run_id) return request.node.node_id == node_id def _verify_hmac( @@ -210,17 +211,17 @@ def _create_authenticated_node( ), ) ) - - node_id = self.state.get_node_id(public_key_bytes) + state = self.state_factory.state() + node_id = state.get_node_id(public_key_bytes) # Handle `CreateNode` here instead of calling the default method handler # Return previously assigned `node_id` for the provided `public_key` if node_id is not None: - self.state.acknowledge_ping(node_id, request.ping_interval) + state.acknowledge_ping(node_id, request.ping_interval) return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) # No `node_id` exists for the provided `public_key` # Handle `CreateNode` here instead of calling the default method handler # Note: the innermost `CreateNode` method will never be called - node_id = self.state.create_node(request.ping_interval, public_key_bytes) + node_id = state.create_node(request.ping_interval, public_key_bytes) return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py index cf7e05f0fb00..ce43fc4bae0a 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -20,6 +20,7 @@ import grpc +from flwr.common import ConfigsRecord from flwr.common.constant import FLEET_API_GRPC_RERE_DEFAULT_ADDRESS from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( compute_hmac, @@ -45,7 +46,7 @@ from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 from flwr.server.app import _run_fleet_api_grpc_rere from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state.state_factory import StateFactory +from flwr.server.superlink.linkstate.linkstate_factory import LinkStateFactory from .server_interceptor import ( _AUTH_TOKEN_HEADER, @@ -62,7 +63,7 @@ def setUp(self) -> None: self._node_private_key, self._node_public_key = generate_key_pairs() self._server_private_key, self._server_public_key = generate_key_pairs() - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") self.state = state_factory.state() ffs_factory = FfsFactory(".") self.ffs = ffs_factory.ffs() @@ -72,7 +73,7 @@ def setUp(self) -> None: ) self.state.store_node_public_keys({public_key_to_bytes(self._node_public_key)}) - self._server_interceptor = AuthenticateServerInterceptor(self.state) + self._server_interceptor = AuthenticateServerInterceptor(state_factory) self._server: grpc.Server = _run_fleet_api_grpc_rere( FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, state_factory, @@ -334,7 +335,7 @@ def test_successful_get_run_with_metadata(self) -> None: self.state.create_node( ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) - run_id = self.state.create_run("", "", "", {}) + run_id = self.state.create_run("", "", "", {}, ConfigsRecord()) request = GetRunRequest(run_id=run_id) shared_secret = generate_shared_key( self._node_private_key, self._server_public_key @@ -365,7 +366,7 @@ def test_unsuccessful_get_run_with_metadata(self) -> None: self.state.create_node( ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) - run_id = self.state.create_run("", "", "", {}) + run_id = self.state.create_run("", "", "", {}, ConfigsRecord()) request = GetRunRequest(run_id=run_id) node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 85f3fa34e0ac..38df6f441a20 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -43,12 +43,12 @@ ) from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkState def create_node( request: CreateNodeRequest, # pylint: disable=unused-argument - state: State, + state: LinkState, ) -> CreateNodeResponse: """.""" # Create node @@ -56,7 +56,7 @@ def create_node( return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) -def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: +def delete_node(request: DeleteNodeRequest, state: LinkState) -> DeleteNodeResponse: """.""" # Validate node_id if request.node.anonymous or request.node.node_id == 0: @@ -69,14 +69,14 @@ def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: def ping( request: PingRequest, # pylint: disable=unused-argument - state: State, # pylint: disable=unused-argument + state: LinkState, # pylint: disable=unused-argument ) -> PingResponse: """.""" res = state.acknowledge_ping(request.node.node_id, request.ping_interval) return PingResponse(success=res) -def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: +def pull_task_ins(request: PullTaskInsRequest, state: LinkState) -> PullTaskInsResponse: """Pull TaskIns handler.""" # Get node_id if client node is not anonymous node = request.node # pylint: disable=no-member @@ -92,7 +92,7 @@ def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsRespo return response -def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResResponse: +def push_task_res(request: PushTaskResRequest, state: LinkState) -> PushTaskResResponse: """Push TaskRes handler.""" # pylint: disable=no-member task_res: TaskRes = request.task_res_list[0] @@ -113,7 +113,7 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo def get_run( - request: GetRunRequest, state: State # pylint: disable=W0613 + request: GetRunRequest, state: LinkState # pylint: disable=W0613 ) -> GetRunResponse: """Get run information.""" run = state.get_run(request.run_id) diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index a988252b3ea2..a684cd9b3bf2 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -19,7 +19,7 @@ import sys from collections.abc import Awaitable -from typing import Callable, TypeVar +from typing import Callable, TypeVar, cast from google.protobuf.message import Message as GrpcMessage @@ -39,8 +39,9 @@ ) from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory try: from starlette.applications import Starlette @@ -90,7 +91,7 @@ async def wrapper(request: Request) -> Response: async def create_node(request: CreateNodeRequest) -> CreateNodeResponse: """Create Node.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.create_node(request=request, state=state) @@ -100,7 +101,7 @@ async def create_node(request: CreateNodeRequest) -> CreateNodeResponse: async def delete_node(request: DeleteNodeRequest) -> DeleteNodeResponse: """Delete Node Id.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.delete_node(request=request, state=state) @@ -110,7 +111,7 @@ async def delete_node(request: DeleteNodeRequest) -> DeleteNodeResponse: async def pull_task_ins(request: PullTaskInsRequest) -> PullTaskInsResponse: """Pull TaskIns.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.pull_task_ins(request=request, state=state) @@ -121,7 +122,7 @@ async def pull_task_ins(request: PullTaskInsRequest) -> PullTaskInsResponse: async def push_task_res(request: PushTaskResRequest) -> PushTaskResResponse: """Push TaskRes.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.push_task_res(request=request, state=state) @@ -131,7 +132,7 @@ async def push_task_res(request: PushTaskResRequest) -> PushTaskResResponse: async def ping(request: PingRequest) -> PingResponse: """Ping.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.ping(request=request, state=state) @@ -141,7 +142,7 @@ async def ping(request: PingRequest) -> PingResponse: async def get_run(request: GetRunRequest) -> GetRunResponse: """GetRun.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = cast(LinkStateFactory, app.state.STATE_FACTORY).state() # Handle message return message_handler.get_run(request=request, state=state) @@ -151,7 +152,7 @@ async def get_run(request: GetRunRequest) -> GetRunResponse: async def get_fab(request: GetFabRequest) -> GetFabResponse: """GetRun.""" # Get ffs from app - ffs: Ffs = app.state.FFS_FACTORY.state() + ffs: Ffs = cast(FfsFactory, app.state.FFS_FACTORY).ffs() # Handle message return message_handler.get_fab(request=request, ffs=ffs) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index 1cbdc230c938..753f450e835c 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -22,7 +22,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ( DEFAULT_TTL, Config, @@ -104,8 +104,10 @@ def _create_message_and_context() -> tuple[Message, Context, float]: ), ) - # Construct NodeState and retrieve context - node_state = NodeState(node_id=run_id, node_config={PARTITION_ID_KEY: str(0)}) + # Construct DeprecatedRunInfoStore and retrieve context + node_state = DeprecatedRunInfoStore( + node_id=run_id, node_config={PARTITION_ID_KEY: str(0)} + ) node_state.register_context(run_id=run_id) context = node_state.retrieve_context(run_id=run_id) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index 785390534001..7a2d28dec4fb 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -28,7 +28,7 @@ from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.clientapp.utils import get_load_client_app_fn -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common.constant import ( NUM_PARTITIONS_KEY, PARTITION_ID_KEY, @@ -40,7 +40,7 @@ from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.common.typing import Run from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import State, StateFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory from .backend import Backend, error_messages_backends, supported_backends @@ -48,7 +48,7 @@ def _register_nodes( - num_nodes: int, state_factory: StateFactory + num_nodes: int, state_factory: LinkStateFactory ) -> NodeToPartitionMapping: """Register nodes with the StateFactory and create node-id:partition-id mapping.""" nodes_mapping: NodeToPartitionMapping = {} @@ -60,16 +60,16 @@ def _register_nodes( return nodes_mapping -def _register_node_states( +def _register_node_info_stores( nodes_mapping: NodeToPartitionMapping, run: Run, app_dir: Optional[str] = None, -) -> dict[int, NodeState]: - """Create NodeState objects and pre-register the context for the run.""" - node_states: dict[int, NodeState] = {} +) -> dict[int, DeprecatedRunInfoStore]: + """Create DeprecatedRunInfoStore objects and register the context for the run.""" + node_info_store: dict[int, DeprecatedRunInfoStore] = {} num_partitions = len(set(nodes_mapping.values())) for node_id, partition_id in nodes_mapping.items(): - node_states[node_id] = NodeState( + node_info_store[node_id] = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: partition_id, @@ -78,18 +78,18 @@ def _register_node_states( ) # Pre-register Context objects - node_states[node_id].register_context( + node_info_store[node_id].register_context( run_id=run.run_id, run=run, app_dir=app_dir ) - return node_states + return node_info_store # pylint: disable=too-many-arguments,too-many-locals def worker( taskins_queue: "Queue[TaskIns]", taskres_queue: "Queue[TaskRes]", - node_states: dict[int, NodeState], + node_info_store: dict[int, DeprecatedRunInfoStore], backend: Backend, f_stop: threading.Event, ) -> None: @@ -103,7 +103,7 @@ def worker( node_id = task_ins.task.consumer.node_id # Retrieve context - context = node_states[node_id].retrieve_context(run_id=task_ins.run_id) + context = node_info_store[node_id].retrieve_context(run_id=task_ins.run_id) # Convert TaskIns to Message message = message_from_taskins(task_ins) @@ -112,7 +112,7 @@ def worker( out_mssg, updated_context = backend.process_message(message, context) # Update Context - node_states[node_id].update_context( + node_info_store[node_id].update_context( task_ins.run_id, context=updated_context ) except Empty: @@ -145,7 +145,7 @@ def worker( def add_taskins_to_queue( - state: State, + state: LinkState, queue: "Queue[TaskIns]", nodes_mapping: NodeToPartitionMapping, f_stop: threading.Event, @@ -160,7 +160,7 @@ def add_taskins_to_queue( def put_taskres_into_state( - state: State, queue: "Queue[TaskRes]", f_stop: threading.Event + state: LinkState, queue: "Queue[TaskRes]", f_stop: threading.Event ) -> None: """Put TaskRes into State from a queue.""" while not f_stop.is_set(): @@ -177,8 +177,8 @@ def run_api( app_fn: Callable[[], ClientApp], backend_fn: Callable[[], Backend], nodes_mapping: NodeToPartitionMapping, - state_factory: StateFactory, - node_states: dict[int, NodeState], + state_factory: LinkStateFactory, + node_info_stores: dict[int, DeprecatedRunInfoStore], f_stop: threading.Event, ) -> None: """Run the VCE.""" @@ -223,7 +223,7 @@ def run_api( worker, taskins_queue, taskres_queue, - node_states, + node_info_stores, backend, f_stop, ) @@ -264,7 +264,7 @@ def start_vce( client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, num_supernodes: Optional[int] = None, - state_factory: Optional[StateFactory] = None, + state_factory: Optional[LinkStateFactory] = None, existing_nodes_mapping: Optional[NodeToPartitionMapping] = None, ) -> None: """Start Fleet API with the Simulation Engine.""" @@ -303,7 +303,7 @@ def start_vce( if not state_factory: log(INFO, "A StateFactory was not supplied to the SimulationEngine.") # Create an empty in-memory state factory - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") log(INFO, "Created new %s.", state_factory.__class__.__name__) if num_supernodes: @@ -312,8 +312,8 @@ def start_vce( num_nodes=num_supernodes, state_factory=state_factory ) - # Construct mapping of NodeStates - node_states = _register_node_states( + # Construct mapping of DeprecatedRunInfoStore + node_info_stores = _register_node_info_stores( nodes_mapping=nodes_mapping, run=run, app_dir=app_dir if is_app else None ) @@ -376,7 +376,7 @@ def _load_client_app() -> ClientApp: backend_fn, nodes_mapping, state_factory, - node_states, + node_info_stores, f_stop, ) except LoadClientAppError as loadapp_ex: diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index bc34b825c333..9faf9f341af6 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -39,16 +39,19 @@ Metadata, RecordSet, Scalar, + now, ) +from flwr.common.constant import Status from flwr.common.recordset_compat import getpropertiesins_to_recordset from flwr.common.serde import message_from_taskres, message_to_taskins -from flwr.common.typing import Run +from flwr.common.typing import Run, RunStatus from flwr.server.superlink.fleet.vce.vce_api import ( NodeToPartitionMapping, _register_nodes, start_vce, ) -from flwr.server.superlink.state import InMemoryState, StateFactory +from flwr.server.superlink.linkstate import InMemoryLinkState, LinkStateFactory +from flwr.server.superlink.linkstate.in_memory_linkstate import RunRecord class DummyClient(NumPyClient): @@ -86,11 +89,11 @@ def terminate_simulation(f_stop: threading.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, -) -> tuple[StateFactory, NodeToPartitionMapping, dict[UUID, float]]: +) -> tuple[LinkStateFactory, NodeToPartitionMapping, dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it run_id = 1234 - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") # Register a few nodes nodes_mapping = _register_nodes(num_nodes=num_nodes, state_factory=state_factory) @@ -106,19 +109,30 @@ def init_state_factory_nodes_mapping( # pylint: disable=too-many-locals def register_messages_into_state( - state_factory: StateFactory, + state_factory: LinkStateFactory, nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, ) -> dict[UUID, float]: """Register `num_messages` into the state factory.""" - state: InMemoryState = state_factory.state() # type: ignore - state.run_ids[run_id] = Run( - run_id=run_id, - fab_id="Mock/mock", - fab_version="v1.0.0", - fab_hash="hash", - override_config={}, + state: InMemoryLinkState = state_factory.state() # type: ignore + state.run_ids[run_id] = RunRecord( + Run( + run_id=run_id, + fab_id="Mock/mock", + fab_version="v1.0.0", + fab_hash="hash", + override_config={}, + pending_at=now().isoformat(), + starting_at="", + running_at="", + finished_at="", + status=RunStatus( + status=Status.PENDING, + sub_status="", + details="", + ), + ), ) # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic @@ -176,7 +190,7 @@ def start_and_shutdown( client_app_attr: Optional[str] = None, app_dir: str = "", num_supernodes: Optional[int] = None, - state_factory: Optional[StateFactory] = None, + state_factory: Optional[LinkStateFactory] = None, nodes_mapping: Optional[NodeToPartitionMapping] = None, duration: int = 0, backend_config: str = "{}", @@ -201,7 +215,7 @@ def start_and_shutdown( if not app_dir: app_dir = _autoresolve_app_dir() - run = Run(run_id=1234, fab_id="", fab_version="", fab_hash="", override_config={}) + run = Run.create_empty(run_id=1234) start_vce( num_supernodes=num_supernodes, diff --git a/src/py/flwr/server/superlink/linkstate/__init__.py b/src/py/flwr/server/superlink/linkstate/__init__.py new file mode 100644 index 000000000000..471cfbd2b5ec --- /dev/null +++ b/src/py/flwr/server/superlink/linkstate/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower LinkState.""" + + +from .in_memory_linkstate import InMemoryLinkState as InMemoryLinkState +from .linkstate import LinkState as LinkState +from .linkstate_factory import LinkStateFactory as LinkStateFactory +from .sqlite_linkstate import SqliteLinkState as SqliteLinkState + +__all__ = [ + "InMemoryLinkState", + "LinkState", + "LinkStateFactory", + "SqliteLinkState", +] diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py similarity index 56% rename from src/py/flwr/server/superlink/state/in_memory_state.py rename to src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py index a9c4176ee5f2..c2273a36a5db 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py @@ -12,31 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""In-memory State implementation.""" +"""In-memory LinkState implementation.""" import threading import time +from bisect import bisect_right +from dataclasses import dataclass, field from logging import ERROR, WARNING from typing import Optional from uuid import UUID, uuid4 -from flwr.common import log, now +from flwr.common import Context, log, now from flwr.common.constant import ( MESSAGE_TTL_TOLERANCE, NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES, + Status, ) -from flwr.common.typing import Run, UserConfig +from flwr.common.record import ConfigsRecord +from flwr.common.typing import Run, RunStatus, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state.state import State +from flwr.server.superlink.linkstate.linkstate import LinkState from flwr.server.utils import validate_task_ins_or_res -from .utils import generate_rand_int_from_bytes, make_node_unavailable_taskres +from .utils import ( + generate_rand_int_from_bytes, + has_valid_sub_status, + is_valid_transition, + verify_found_taskres, + verify_taskins_ids, +) + + +@dataclass +class RunRecord: # pylint: disable=R0902 + """The record of a specific run, including its status and timestamps.""" + run: Run + logs: list[tuple[float, str]] = field(default_factory=list) + log_lock: threading.Lock = field(default_factory=threading.Lock) -class InMemoryState(State): # pylint: disable=R0902,R0904 - """In-memory State implementation.""" + +class InMemoryLinkState(LinkState): # pylint: disable=R0902,R0904 + """In-memory LinkState implementation.""" def __init__(self) -> None: @@ -44,16 +63,19 @@ def __init__(self) -> None: self.node_ids: dict[int, tuple[float, float]] = {} self.public_key_to_node_id: dict[bytes, int] = {} - # Map run_id to (fab_id, fab_version) - self.run_ids: dict[int, Run] = {} + # Map run_id to RunRecord + self.run_ids: dict[int, RunRecord] = {} + self.contexts: dict[int, Context] = {} + self.federation_options: dict[int, ConfigsRecord] = {} self.task_ins_store: dict[UUID, TaskIns] = {} self.task_res_store: dict[UUID, TaskRes] = {} + self.task_ins_id_to_task_res_id: dict[UUID, UUID] = {} self.node_public_keys: set[bytes] = set() self.server_public_key: Optional[bytes] = None self.server_private_key: Optional[bytes] = None - self.lock = threading.Lock() + self.lock = threading.RLock() def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: """Store one TaskIns.""" @@ -64,8 +86,25 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: return None # Validate run_id if task_ins.run_id not in self.run_ids: - log(ERROR, "`run_id` is invalid") + log(ERROR, "Invalid run ID for TaskIns: %s", task_ins.run_id) + return None + # Validate source node ID + if task_ins.task.producer.node_id != 0: + log( + ERROR, + "Invalid source node ID for TaskIns: %s", + task_ins.task.producer.node_id, + ) return None + # Validate destination node ID + if not task_ins.task.consumer.anonymous: + if task_ins.task.consumer.node_id not in self.node_ids: + log( + ERROR, + "Invalid destination node ID for TaskIns: %s", + task_ins.task.consumer.node_id, + ) + return None # Create task_id task_id = uuid4() @@ -186,57 +225,50 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: task_res.task_id = str(task_id) with self.lock: self.task_res_store[task_id] = task_res + self.task_ins_id_to_task_res_id[UUID(task_ins_id)] = task_id # Return the new task_id return task_id def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: - """Get all TaskRes that have not been delivered yet.""" + """Get TaskRes for the given TaskIns IDs.""" + ret: dict[UUID, TaskRes] = {} + with self.lock: - # Find TaskRes that were not delivered yet - task_res_list: list[TaskRes] = [] - replied_task_ids: set[UUID] = set() - for _, task_res in self.task_res_store.items(): - reply_to = UUID(task_res.task.ancestry[0]) - - # Check if corresponding TaskIns exists and is not expired - task_ins = self.task_ins_store.get(reply_to) - if task_ins is None: - log(WARNING, "TaskIns with task_id %s does not exist.", reply_to) - task_ids.remove(reply_to) - continue - - if task_ins.task.created_at + task_ins.task.ttl <= time.time(): - log(WARNING, "TaskIns with task_id %s is expired.", reply_to) - task_ids.remove(reply_to) - continue - - if reply_to in task_ids and task_res.task.delivered_at == "": - task_res_list.append(task_res) - replied_task_ids.add(reply_to) - - # Check if the node is offline - for task_id in task_ids - replied_task_ids: - task_ins = self.task_ins_store.get(task_id) - if task_ins is None: - continue - node_id = task_ins.task.consumer.node_id - online_until, _ = self.node_ids[node_id] - # Generate a TaskRes containing an error reply if the node is offline. - if online_until < time.time(): - err_taskres = make_node_unavailable_taskres( - ref_taskins=task_ins, - ) - self.task_res_store[UUID(err_taskres.task_id)] = err_taskres - task_res_list.append(err_taskres) - - # Mark all of them as delivered + current = time.time() + + # Verify TaskIns IDs + ret = verify_taskins_ids( + inquired_taskins_ids=task_ids, + found_taskins_dict=self.task_ins_store, + current_time=current, + ) + + # Find all TaskRes + task_res_found: list[TaskRes] = [] + for task_id in task_ids: + # If TaskRes exists and is not delivered, add it to the list + if task_res_id := self.task_ins_id_to_task_res_id.get(task_id): + task_res = self.task_res_store[task_res_id] + if task_res.task.delivered_at == "": + task_res_found.append(task_res) + tmp_ret_dict = verify_found_taskres( + inquired_taskins_ids=task_ids, + found_taskins_dict=self.task_ins_store, + found_taskres_list=task_res_found, + current_time=current, + ) + ret.update(tmp_ret_dict) + + # Mark existing TaskRes to be returned as delivered delivered_at = now().isoformat() - for task_res in task_res_list: + for task_res in task_res_found: task_res.task.delivered_at = delivered_at - # Return TaskRes - return task_res_list + # Cleanup + self._force_delete_tasks_by_ids(set(ret.keys())) + + return list(ret.values()) def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @@ -257,9 +289,25 @@ def delete_tasks(self, task_ids: set[UUID]) -> None: for task_id in task_ins_to_be_deleted: del self.task_ins_store[task_id] + del self.task_ins_id_to_task_res_id[task_id] for task_id in task_res_to_be_deleted: del self.task_res_store[task_id] + def _force_delete_tasks_by_ids(self, task_ids: set[UUID]) -> None: + """Delete tasks based on a set of TaskIns IDs.""" + if not task_ids: + return + + with self.lock: + for task_id in task_ids: + # Delete TaskIns + if task_id in self.task_ins_store: + del self.task_ins_store[task_id] + # Delete TaskRes + if task_id in self.task_ins_id_to_task_res_id: + task_res_id = self.task_ins_id_to_task_res_id.pop(task_id) + del self.task_res_store[task_res_id] + def num_task_ins(self) -> int: """Calculate the number of task_ins in store. @@ -277,7 +325,7 @@ def num_task_res(self) -> int: def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" + """Create, store in the link state, and return `node_id`.""" # Sample a random int64 as node_id node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) @@ -338,12 +386,14 @@ def get_node_id(self, node_public_key: bytes) -> Optional[int]: """Retrieve stored `node_id` filtered by `node_public_keys`.""" return self.public_key_to_node_id.get(node_public_key) + # pylint: disable=too-many-arguments,too-many-positional-arguments def create_run( self, fab_id: Optional[str], fab_version: Optional[str], fab_hash: Optional[str], override_config: UserConfig, + federation_options: ConfigsRecord, ) -> int: """Create a new run for the specified `fab_hash`.""" # Sample a random int64 as run_id @@ -351,13 +401,28 @@ def create_run( run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) if run_id not in self.run_ids: - self.run_ids[run_id] = Run( - run_id=run_id, - fab_id=fab_id if fab_id else "", - fab_version=fab_version if fab_version else "", - fab_hash=fab_hash if fab_hash else "", - override_config=override_config, + run_record = RunRecord( + run=Run( + run_id=run_id, + fab_id=fab_id if fab_id else "", + fab_version=fab_version if fab_version else "", + fab_hash=fab_hash if fab_hash else "", + override_config=override_config, + pending_at=now().isoformat(), + starting_at="", + running_at="", + finished_at="", + status=RunStatus( + status=Status.PENDING, + sub_status="", + details="", + ), + ), ) + self.run_ids[run_id] = run_record + + # Record federation options. Leave empty if not passed + self.federation_options[run_id] = federation_options return run_id log(ERROR, "Unexpected run creation failure.") return 0 @@ -365,7 +430,7 @@ def create_run( def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" with self.lock: if self.server_private_key is None and self.server_public_key is None: self.server_private_key = private_key @@ -382,12 +447,12 @@ def get_server_public_key(self) -> Optional[bytes]: return self.server_public_key def store_node_public_keys(self, public_keys: set[bytes]) -> None: - """Store a set of `node_public_keys` in state.""" + """Store a set of `node_public_keys` in the link state.""" with self.lock: self.node_public_keys = public_keys def store_node_public_key(self, public_key: bytes) -> None: - """Store a `node_public_key` in state.""" + """Store a `node_public_key` in the link state.""" with self.lock: self.node_public_keys.add(public_key) @@ -395,13 +460,88 @@ def get_node_public_keys(self) -> set[bytes]: """Retrieve all currently stored `node_public_keys` as a set.""" return self.node_public_keys + def get_run_ids(self) -> set[int]: + """Retrieve all run IDs.""" + with self.lock: + return set(self.run_ids.keys()) + def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" with self.lock: if run_id not in self.run_ids: log(ERROR, "`run_id` is invalid") return None - return self.run_ids[run_id] + return self.run_ids[run_id].run + + def get_run_status(self, run_ids: set[int]) -> dict[int, RunStatus]: + """Retrieve the statuses for the specified runs.""" + with self.lock: + return { + run_id: self.run_ids[run_id].run.status + for run_id in set(run_ids) + if run_id in self.run_ids + } + + def update_run_status(self, run_id: int, new_status: RunStatus) -> bool: + """Update the status of the run with the specified `run_id`.""" + with self.lock: + # Check if the run_id exists + if run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") + return False + + # Check if the status transition is valid + current_status = self.run_ids[run_id].run.status + if not is_valid_transition(current_status, new_status): + log( + ERROR, + 'Invalid status transition: from "%s" to "%s"', + current_status.status, + new_status.status, + ) + return False + + # Check if the sub-status is valid + if not has_valid_sub_status(current_status): + log( + ERROR, + 'Invalid sub-status "%s" for status "%s"', + current_status.sub_status, + current_status.status, + ) + return False + + # Update the status + run_record = self.run_ids[run_id] + if new_status.status == Status.STARTING: + run_record.run.starting_at = now().isoformat() + elif new_status.status == Status.RUNNING: + run_record.run.running_at = now().isoformat() + elif new_status.status == Status.FINISHED: + run_record.run.finished_at = now().isoformat() + run_record.run.status = new_status + return True + + def get_pending_run_id(self) -> Optional[int]: + """Get the `run_id` of a run with `Status.PENDING` status, if any.""" + pending_run_id = None + + # Loop through all registered runs + for run_id, run_rec in self.run_ids.items(): + # Break once a pending run is found + if run_rec.run.status.status == Status.PENDING: + pending_run_id = run_id + break + + return pending_run_id + + def get_federation_options(self, run_id: int) -> Optional[ConfigsRecord]: + """Retrieve the federation options for the specified `run_id`.""" + with self.lock: + if run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") + return None + return self.federation_options[run_id] def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" @@ -410,3 +550,36 @@ def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) return True return False + + def get_serverapp_context(self, run_id: int) -> Optional[Context]: + """Get the context for the specified `run_id`.""" + return self.contexts.get(run_id) + + def set_serverapp_context(self, run_id: int, context: Context) -> None: + """Set the context for the specified `run_id`.""" + if run_id not in self.run_ids: + raise ValueError(f"Run {run_id} not found") + self.contexts[run_id] = context + + def add_serverapp_log(self, run_id: int, log_message: str) -> None: + """Add a log entry to the serverapp logs for the specified `run_id`.""" + if run_id not in self.run_ids: + raise ValueError(f"Run {run_id} not found") + run = self.run_ids[run_id] + with run.log_lock: + run.logs.append((now().timestamp(), log_message)) + + def get_serverapp_log( + self, run_id: int, after_timestamp: Optional[float] + ) -> tuple[str, float]: + """Get the serverapp logs for the specified `run_id`.""" + if run_id not in self.run_ids: + raise ValueError(f"Run {run_id} not found") + run = self.run_ids[run_id] + if after_timestamp is None: + after_timestamp = 0.0 + with run.log_lock: + # Find the index where the timestamp would be inserted + index = bisect_right(run.logs, (after_timestamp, "")) + latest_timestamp = run.logs[-1][0] if index < len(run.logs) else 0.0 + return "".join(log for _, log in run.logs[index:]), latest_timestamp diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/linkstate/linkstate.py similarity index 52% rename from src/py/flwr/server/superlink/state/state.py rename to src/py/flwr/server/superlink/linkstate/linkstate.py index b220aad3ebcc..05fb3c2f0cc6 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate.py @@ -12,28 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Abstract base class State.""" +"""Abstract base class LinkState.""" import abc from typing import Optional from uuid import UUID -from flwr.common.typing import Run, UserConfig +from flwr.common import Context +from flwr.common.record import ConfigsRecord +from flwr.common.typing import Run, RunStatus, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -class State(abc.ABC): # pylint: disable=R0904 - """Abstract State.""" +class LinkState(abc.ABC): # pylint: disable=R0904 + """Abstract LinkState.""" @abc.abstractmethod def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: """Store one TaskIns. - Usually, the Driver API calls this to schedule instructions. + Usually, the ServerAppIo API calls this to schedule instructions. - Stores the value of the `task_ins` in the state and, if successful, returns the - `task_id` (UUID) of the `task_ins`. If, for any reason, + Stores the value of the `task_ins` in the link state and, if successful, + returns the `task_id` (UUID) of the `task_ins`. If, for any reason, storing the `task_ins` fails, `None` is returned. Constraints @@ -99,13 +101,27 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: @abc.abstractmethod def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: - """Get TaskRes for task_ids. + """Get TaskRes for the given TaskIns IDs. - Usually, the Driver API calls this method to get results for instructions it has - previously scheduled. + This method is typically called by the ServerAppIo API to obtain + results (TaskRes) for previously scheduled instructions (TaskIns). + For each task_id provided, this method returns one of the following responses: - Retrieves all TaskRes for the given `task_ids` and returns and empty list of - none could be found. + - An error TaskRes if the corresponding TaskIns does not exist or has expired. + - An error TaskRes if the corresponding TaskRes exists but has expired. + - The valid TaskRes if the TaskIns has a corresponding valid TaskRes. + - Nothing if the TaskIns is still valid and waiting for a TaskRes. + + Parameters + ---------- + task_ids : set[UUID] + A set of TaskIns IDs for which to retrieve results (TaskRes). + + Returns + ------- + list[TaskRes] + A list of TaskRes corresponding to the given task IDs. If no + TaskRes could be found for any of the task IDs, an empty list is returned. """ @abc.abstractmethod @@ -130,11 +146,11 @@ def delete_tasks(self, task_ids: set[UUID]) -> None: def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" + """Create, store in the link state, and return `node_id`.""" @abc.abstractmethod def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Remove `node_id` from state.""" + """Remove `node_id` from the link state.""" @abc.abstractmethod def get_nodes(self, run_id: int) -> set[int]: @@ -151,15 +167,20 @@ def get_node_id(self, node_public_key: bytes) -> Optional[int]: """Retrieve stored `node_id` filtered by `node_public_keys`.""" @abc.abstractmethod - def create_run( + def create_run( # pylint: disable=too-many-arguments,too-many-positional-arguments self, fab_id: Optional[str], fab_version: Optional[str], fab_hash: Optional[str], override_config: UserConfig, + federation_options: ConfigsRecord, ) -> int: """Create a new run for the specified `fab_hash`.""" + @abc.abstractmethod + def get_run_ids(self) -> set[int]: + """Retrieve all run IDs.""" + @abc.abstractmethod def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`. @@ -172,17 +193,77 @@ def get_run(self, run_id: int) -> Optional[Run]: Returns ------- Optional[Run] - A dataclass instance containing three elements if `run_id` is valid: - - `run_id`: The identifier of the run, same as the specified `run_id`. - - `fab_id`: The identifier of the FAB used in the specified run. - - `fab_version`: The version of the FAB used in the specified run. + The `Run` instance if found; otherwise, `None`. + """ + + @abc.abstractmethod + def get_run_status(self, run_ids: set[int]) -> dict[int, RunStatus]: + """Retrieve the statuses for the specified runs. + + Parameters + ---------- + run_ids : set[int] + A set of run identifiers for which to retrieve statuses. + + Returns + ------- + dict[int, RunStatus] + A dictionary mapping each valid run ID to its corresponding status. + + Notes + ----- + Only valid run IDs that exist in the State will be included in the returned + dictionary. If a run ID is not found, it will be omitted from the result. + """ + + @abc.abstractmethod + def update_run_status(self, run_id: int, new_status: RunStatus) -> bool: + """Update the status of the run with the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run. + new_status : RunStatus + The new status to be assigned to the run. + + Returns + ------- + bool + True if the status update is successful; False otherwise. + """ + + @abc.abstractmethod + def get_pending_run_id(self) -> Optional[int]: + """Get the `run_id` of a run with `Status.PENDING` status. + + Returns + ------- + Optional[int] + The `run_id` of a `Run` that is pending to be started; None if + there is no Run pending. + """ + + @abc.abstractmethod + def get_federation_options(self, run_id: int) -> Optional[ConfigsRecord]: + """Retrieve the federation options for the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run. + + Returns + ------- + Optional[ConfigsRecord] + The federation options for the run if it exists; None otherwise. """ @abc.abstractmethod def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" @abc.abstractmethod def get_server_private_key(self) -> Optional[bytes]: @@ -194,11 +275,11 @@ def get_server_public_key(self) -> Optional[bytes]: @abc.abstractmethod def store_node_public_keys(self, public_keys: set[bytes]) -> None: - """Store a set of `node_public_keys` in state.""" + """Store a set of `node_public_keys` in the link state.""" @abc.abstractmethod def store_node_public_key(self, public_key: bytes) -> None: - """Store a `node_public_key` in state.""" + """Store a `node_public_key` in the link state.""" @abc.abstractmethod def get_node_public_keys(self) -> set[bytes]: @@ -222,3 +303,66 @@ def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: is_acknowledged : bool True if the ping is successfully acknowledged; otherwise, False. """ + + @abc.abstractmethod + def get_serverapp_context(self, run_id: int) -> Optional[Context]: + """Get the context for the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run for which to retrieve the context. + + Returns + ------- + Optional[Context] + The context associated with the specified `run_id`, or `None` if no context + exists for the given `run_id`. + """ + + @abc.abstractmethod + def set_serverapp_context(self, run_id: int, context: Context) -> None: + """Set the context for the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run for which to set the context. + context : Context + The context to be associated with the specified `run_id`. + """ + + @abc.abstractmethod + def add_serverapp_log(self, run_id: int, log_message: str) -> None: + """Add a log entry to the ServerApp logs for the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run for which to add a log entry. + log_message : str + The log entry to be added to the ServerApp logs. + """ + + @abc.abstractmethod + def get_serverapp_log( + self, run_id: int, after_timestamp: Optional[float] + ) -> tuple[str, float]: + """Get the ServerApp logs for the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run for which to retrieve the ServerApp logs. + + after_timestamp : Optional[float] + Retrieve logs after this timestamp. If set to `None`, retrieve all logs. + + Returns + ------- + tuple[str, float] + A tuple containing: + - The ServerApp logs associated with the specified `run_id`. + - The timestamp of the latest log entry in the returned logs. + Returns `0` if no logs are returned. + """ diff --git a/src/py/flwr/server/superlink/state/state_factory.py b/src/py/flwr/server/superlink/linkstate/linkstate_factory.py similarity index 80% rename from src/py/flwr/server/superlink/state/state_factory.py rename to src/py/flwr/server/superlink/linkstate/linkstate_factory.py index 96c8d445c16e..403b9bf5b4cc 100644 --- a/src/py/flwr/server/superlink/state/state_factory.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate_factory.py @@ -20,13 +20,13 @@ from flwr.common.logger import log -from .in_memory_state import InMemoryState -from .sqlite_state import SqliteState -from .state import State +from .in_memory_linkstate import InMemoryLinkState +from .linkstate import LinkState +from .sqlite_linkstate import SqliteLinkState -class StateFactory: - """Factory class that creates State instances. +class LinkStateFactory: + """Factory class that creates LinkState instances. Parameters ---------- @@ -39,19 +39,19 @@ class StateFactory: def __init__(self, database: str) -> None: self.database = database - self.state_instance: Optional[State] = None + self.state_instance: Optional[LinkState] = None - def state(self) -> State: + def state(self) -> LinkState: """Return a State instance and create it, if necessary.""" # InMemoryState if self.database == ":flwr-in-memory-state:": if self.state_instance is None: - self.state_instance = InMemoryState() + self.state_instance = InMemoryLinkState() log(DEBUG, "Using InMemoryState") return self.state_instance # SqliteState - state = SqliteState(self.database) + state = SqliteLinkState(self.database) state.initialize() log(DEBUG, "Using SqliteState") return state diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/linkstate/linkstate_test.py similarity index 58% rename from src/py/flwr/server/superlink/state/state_test.py rename to src/py/flwr/server/superlink/linkstate/linkstate_test.py index a4663f80f630..202fdf387277 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests all state implemenations have to conform to.""" +"""Tests all LinkState implemenations have to conform to.""" # pylint: disable=invalid-name, too-many-lines, R0904, R0913 import tempfile @@ -23,17 +23,28 @@ from unittest.mock import patch from uuid import UUID -from flwr.common import DEFAULT_TTL -from flwr.common.constant import ErrorCode +from parameterized import parameterized + +from flwr.common import DEFAULT_TTL, ConfigsRecord, Context, RecordSet, now +from flwr.common.constant import Status, SubStatus from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( generate_key_pairs, private_key_to_bytes, public_key_to_bytes, ) -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import InMemoryState, SqliteState, State +from flwr.common.typing import RunStatus + +# pylint: disable=E0611 +from flwr.proto.node_pb2 import Node +from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes + +# pylint: enable=E0611 +from flwr.server.superlink.linkstate import ( + InMemoryLinkState, + LinkState, + SqliteLinkState, +) class StateTest(unittest.TestCase): @@ -43,15 +54,17 @@ class StateTest(unittest.TestCase): __test__ = False @abstractmethod - def state_factory(self) -> State: + def state_factory(self) -> LinkState: """Provide state implementation to test.""" raise NotImplementedError() def test_create_and_get_run(self) -> None: """Test if create_run and get_run work correctly.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {"test_key": "test_value"}) + state: LinkState = self.state_factory() + run_id = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) # Execute run = state.get_run(run_id) @@ -62,6 +75,153 @@ def test_create_and_get_run(self) -> None: assert run.fab_hash == "9f86d08" assert run.override_config["test_key"] == "test_value" + def test_get_all_run_ids(self) -> None: + """Test if get_run_ids works correctly.""" + # Prepare + state = self.state_factory() + run_id1 = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) + run_id2 = state.create_run( + None, None, "fffffff", {"mock_key": "mock_value"}, ConfigsRecord() + ) + + # Execute + run_ids = state.get_run_ids() + + # Assert + assert run_id1 in run_ids + assert run_id2 in run_ids + + def test_get_all_run_ids_empty(self) -> None: + """Test if get_run_ids works correctly when no runs are present.""" + # Prepare + state = self.state_factory() + + # Execute + run_ids = state.get_run_ids() + + # Assert + assert len(run_ids) == 0 + + def test_get_pending_run_id(self) -> None: + """Test if get_pending_run_id works correctly.""" + # Prepare + state = self.state_factory() + _ = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) + run_id2 = state.create_run( + None, None, "fffffff", {"mock_key": "mock_value"}, ConfigsRecord() + ) + state.update_run_status(run_id2, RunStatus(Status.STARTING, "", "")) + + # Execute + pending_run_id = state.get_pending_run_id() + assert pending_run_id is not None + run_status_dict = state.get_run_status({pending_run_id}) + assert run_status_dict[pending_run_id].status == Status.PENDING + + # Change state + state.update_run_status(pending_run_id, RunStatus(Status.STARTING, "", "")) + # Attempt get pending run + pending_run_id = state.get_pending_run_id() + assert pending_run_id is None + + def test_get_and_update_run_status(self) -> None: + """Test if get_run_status and update_run_status work correctly.""" + # Prepare + state = self.state_factory() + run_id1 = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) + run_id2 = state.create_run( + None, None, "fffffff", {"mock_key": "mock_value"}, ConfigsRecord() + ) + state.update_run_status(run_id2, RunStatus(Status.STARTING, "", "")) + state.update_run_status(run_id2, RunStatus(Status.RUNNING, "", "")) + + # Execute + run_status_dict = state.get_run_status({run_id1, run_id2}) + status1 = run_status_dict[run_id1] + status2 = run_status_dict[run_id2] + + # Assert + assert status1.status == Status.PENDING + assert status2.status == Status.RUNNING + + @parameterized.expand([(0,), (1,), (2,)]) # type: ignore + def test_status_transition_valid( + self, num_transitions_before_finishing: int + ) -> None: + """Test valid run status transactions.""" + # Prepare + state = self.state_factory() + run_id = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) + + # Execute and assert + status = state.get_run_status({run_id})[run_id] + assert status.status == Status.PENDING + + if num_transitions_before_finishing > 0: + assert state.update_run_status(run_id, RunStatus(Status.STARTING, "", "")) + status = state.get_run_status({run_id})[run_id] + assert status.status == Status.STARTING + + if num_transitions_before_finishing > 1: + assert state.update_run_status(run_id, RunStatus(Status.RUNNING, "", "")) + status = state.get_run_status({run_id})[run_id] + assert status.status == Status.RUNNING + + assert state.update_run_status( + run_id, RunStatus(Status.FINISHED, SubStatus.FAILED, "mock failure") + ) + + status = state.get_run_status({run_id})[run_id] + assert status.status == Status.FINISHED + + def test_status_transition_invalid(self) -> None: + """Test invalid run status transitions.""" + # Prepare + state = self.state_factory() + run_id = state.create_run( + None, None, "9f86d08", {"test_key": "test_value"}, ConfigsRecord() + ) + run_statuses = [ + RunStatus(Status.PENDING, "", ""), + RunStatus(Status.STARTING, "", ""), + RunStatus(Status.PENDING, "", ""), + RunStatus(Status.FINISHED, SubStatus.COMPLETED, ""), + ] + + # Execute and assert + # Cannot transition from RunStatus.PENDING to RunStatus.PENDING, + # RunStatus.RUNNING, or RunStatus.FINISHED with COMPLETED substatus + for run_status in [s for s in run_statuses if s.status != Status.STARTING]: + assert not state.update_run_status(run_id, run_status) + state.update_run_status(run_id, RunStatus(Status.STARTING, "", "")) + # Cannot transition from RunStatus.STARTING to RunStatus.PENDING, + # RunStatus.STARTING, or RunStatus.FINISHED with COMPLETED substatus + for run_status in [s for s in run_statuses if s.status != Status.RUNNING]: + assert not state.update_run_status(run_id, run_status) + state.update_run_status(run_id, RunStatus(Status.RUNNING, "", "")) + # Cannot transition from RunStatus.RUNNING + # to RunStatus.PENDING, RunStatus.STARTING, or RunStatus.RUNNING + for run_status in [s for s in run_statuses if s.status != Status.FINISHED]: + assert not state.update_run_status(run_id, run_status) + state.update_run_status( + run_id, RunStatus(Status.FINISHED, SubStatus.COMPLETED, "") + ) + # Cannot transition to any status from RunStatus.FINISHED + run_statuses += [ + RunStatus(Status.FINISHED, SubStatus.FAILED, ""), + RunStatus(Status.FINISHED, SubStatus.STOPPED, ""), + ] + for run_status in run_statuses: + assert not state.update_run_status(run_id, run_status) + def test_get_task_ins_empty(self) -> None: """Validate that a new state has no TaskIns.""" # Prepare @@ -87,11 +247,11 @@ def test_get_task_res_empty(self) -> None: def test_store_task_ins_one(self) -> None: """Test store_task_ins.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) assert task_ins.task.created_at < time.time() # pylint: disable=no-member @@ -99,7 +259,7 @@ def test_store_task_ins_one(self) -> None: # Execute state.store_task_ins(task_ins=task_ins) - task_ins_list = state.get_task_ins(node_id=consumer_node_id, limit=10) + task_ins_list = state.get_task_ins(node_id=node_id, limit=10) # Assert assert len(task_ins_list) == 1 @@ -119,20 +279,39 @@ def test_store_task_ins_one(self) -> None: ) assert actual_task.ttl > 0 + def test_store_task_ins_invalid_node_id(self) -> None: + """Test store_task_ins with invalid node_id.""" + # Prepare + state = self.state_factory() + node_id = state.create_node(1e3) + invalid_node_id = 61016 if node_id != 61016 else 61017 + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + task_ins = create_task_ins( + consumer_node_id=invalid_node_id, anonymous=False, run_id=run_id + ) + task_ins2 = create_task_ins( + consumer_node_id=node_id, anonymous=False, run_id=run_id + ) + task_ins2.task.producer.node_id = 61016 + + # Execute and assert + assert state.store_task_ins(task_ins) is None + assert state.store_task_ins(task_ins2) is None + def test_store_and_delete_tasks(self) -> None: """Test delete_tasks.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins_0 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_ins_1 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_ins_2 = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) # Insert three TaskIns @@ -145,11 +324,11 @@ def test_store_and_delete_tasks(self) -> None: assert task_id_2 # Get TaskIns to mark them delivered - _ = state.get_task_ins(node_id=consumer_node_id, limit=None) + _ = state.get_task_ins(node_id=node_id, limit=None) # Insert one TaskRes and retrive it to mark it as delivered task_res_0 = create_task_res( - producer_node_id=consumer_node_id, + producer_node_id=node_id, anonymous=False, ancestry=[str(task_id_0)], run_id=run_id, @@ -160,7 +339,7 @@ def test_store_and_delete_tasks(self) -> None: # Insert one TaskRes, but don't retrive it task_res_1: TaskRes = create_task_res( - producer_node_id=consumer_node_id, + producer_node_id=node_id, anonymous=False, ancestry=[str(task_id_1)], run_id=run_id, @@ -171,13 +350,6 @@ def test_store_and_delete_tasks(self) -> None: # - State has three TaskIns, all of them delivered # - State has two TaskRes, one of the delivered, the other not - assert state.num_task_ins() == 3 - assert state.num_task_res() == 2 - - # Execute - state.delete_tasks(task_ids={task_id_0, task_id_1, task_id_2}) - - # Assert assert state.num_task_ins() == 2 assert state.num_task_res() == 1 @@ -188,7 +360,7 @@ def test_init_state(self) -> None: state = self.state_factory() # Assert - assert isinstance(state, State) + assert isinstance(state, LinkState) # TaskIns tests def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: @@ -197,8 +369,8 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: Create anonymous task and retrieve it. """ # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -212,8 +384,8 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -226,9 +398,12 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) - task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) + state: LinkState = self.state_factory() + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + task_ins = create_task_ins( + consumer_node_id=node_id, anonymous=False, run_id=run_id + ) # Execute _ = state.store_task_ins(task_ins) @@ -240,13 +415,16 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) - task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) + state: LinkState = self.state_factory() + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + task_ins = create_task_ins( + consumer_node_id=node_id, anonymous=False, run_id=run_id + ) # Execute task_ins_uuid = state.store_task_ins(task_ins) - task_ins_list = state.get_task_ins(node_id=1, limit=None) + task_ins_list = state.get_task_ins(node_id=node_id, limit=None) # Assert assert len(task_ins_list) == 1 @@ -257,15 +435,18 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) - task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) + state: LinkState = self.state_factory() + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + task_ins = create_task_ins( + consumer_node_id=node_id, anonymous=False, run_id=run_id + ) # Execute _ = state.store_task_ins(task_ins) # 1st get: set to delivered - task_ins_list = state.get_task_ins(node_id=1, limit=None) + task_ins_list = state.get_task_ins(node_id=node_id, limit=None) assert len(task_ins_list) == 1 @@ -278,7 +459,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: def test_get_task_ins_limit_throws_for_limit_zero(self) -> None: """Fail call with limit=0.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() # Execute & Assert with self.assertRaises(AssertionError): @@ -287,7 +468,7 @@ def test_get_task_ins_limit_throws_for_limit_zero(self) -> None: def test_task_ins_store_invalid_run_id_and_fail(self) -> None: """Store TaskIns with invalid run_id and fail.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=61016) # Execute @@ -300,8 +481,8 @@ def test_task_ins_store_invalid_run_id_and_fail(self) -> None: def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_ins_id = state.store_task_ins(task_ins) @@ -326,8 +507,8 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) # Execute retrieved_node_ids = state.get_nodes(run_id) @@ -338,8 +519,8 @@ def test_node_ids_initial_state(self) -> None: def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_ids = [] # Execute @@ -354,9 +535,9 @@ def test_create_node_and_get_nodes(self) -> None: def test_create_node_public_key(self) -> None: """Test creating a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) # Execute node_id = state.create_node(ping_interval=10, public_key=public_key) @@ -370,9 +551,9 @@ def test_create_node_public_key(self) -> None: def test_create_node_public_key_twice(self) -> None: """Test creating a client node with same public key twice.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute @@ -386,15 +567,15 @@ def test_create_node_public_key_twice(self) -> None: assert retrieved_node_id == node_id # Assert node_ids and public_key_to_node_id are synced - if isinstance(state, InMemoryState): + if isinstance(state, InMemoryLinkState): assert len(state.node_ids) == 1 assert len(state.public_key_to_node_id) == 1 def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_id = state.create_node(ping_interval=10) # Execute @@ -407,9 +588,9 @@ def test_delete_node(self) -> None: def test_delete_node_public_key(self) -> None: """Test deleting a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute @@ -424,9 +605,9 @@ def test_delete_node_public_key(self) -> None: def test_delete_node_public_key_none(self) -> None: """Test deleting a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_id = 0 # Execute & Assert @@ -442,10 +623,10 @@ def test_delete_node_public_key_none(self) -> None: def test_delete_node_wrong_public_key(self) -> None: """Test deleting a client node with wrong public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute & Assert @@ -461,10 +642,10 @@ def test_delete_node_wrong_public_key(self) -> None: def test_get_node_id_wrong_public_key(self) -> None: """Test retrieving a client node with wrong public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) # Execute state.create_node(ping_interval=10, public_key=public_key) @@ -478,8 +659,8 @@ def test_get_node_id_wrong_public_key(self) -> None: def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare - state: State = self.state_factory() - state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) invalid_run_id = 61016 state.create_node(ping_interval=10) @@ -492,8 +673,8 @@ def test_get_nodes_invalid_run_id(self) -> None: def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -510,8 +691,8 @@ def test_num_task_ins(self) -> None: def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_ins_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -544,7 +725,7 @@ def test_num_task_res(self) -> None: def test_server_private_public_key(self) -> None: """Test get server private and public key after inserting.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() private_key, public_key = generate_key_pairs() private_key_bytes = private_key_to_bytes(private_key) public_key_bytes = public_key_to_bytes(public_key) @@ -561,7 +742,7 @@ def test_server_private_public_key(self) -> None: def test_server_private_public_key_none(self) -> None: """Test get server private and public key without inserting.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() # Execute server_private_key = state.get_server_private_key() @@ -574,7 +755,7 @@ def test_server_private_public_key_none(self) -> None: def test_store_server_private_public_key_twice(self) -> None: """Test inserting private and public key twice.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() private_key, public_key = generate_key_pairs() private_key_bytes = private_key_to_bytes(private_key) public_key_bytes = public_key_to_bytes(public_key) @@ -594,7 +775,7 @@ def test_store_server_private_public_key_twice(self) -> None: def test_node_public_keys(self) -> None: """Test store_node_public_keys and get_node_public_keys from state.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} @@ -608,7 +789,7 @@ def test_node_public_keys(self) -> None: def test_node_public_key(self) -> None: """Test store_node_public_key and get_node_public_keys from state.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} @@ -623,8 +804,8 @@ def test_node_public_key(self) -> None: def test_acknowledge_ping(self) -> None: """Test if acknowledge_ping works and if get_nodes return online nodes.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) node_ids = [state.create_node(ping_interval=10) for _ in range(100)] for node_id in node_ids[:70]: state.acknowledge_ping(node_id, ping_interval=30) @@ -639,53 +820,11 @@ def test_acknowledge_ping(self) -> None: # Assert self.assertSetEqual(actual_node_ids, set(node_ids[70:])) - def test_node_unavailable_error(self) -> None: - """Test if get_task_res return TaskRes containing node unavailable error.""" - # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) - node_id_0 = state.create_node(ping_interval=90) - node_id_1 = state.create_node(ping_interval=30) - # Create and store TaskIns - task_ins_0 = create_task_ins( - consumer_node_id=node_id_0, anonymous=False, run_id=run_id - ) - task_ins_1 = create_task_ins( - consumer_node_id=node_id_1, anonymous=False, run_id=run_id - ) - task_id_0 = state.store_task_ins(task_ins=task_ins_0) - task_id_1 = state.store_task_ins(task_ins=task_ins_1) - assert task_id_0 is not None and task_id_1 is not None - - # Get TaskIns to mark them delivered - state.get_task_ins(node_id=node_id_0, limit=None) - - # Create and store TaskRes - task_res_0 = create_task_res( - producer_node_id=node_id_0, - anonymous=False, - ancestry=[str(task_id_0)], - run_id=run_id, - ) - state.store_task_res(task_res_0) - - # Execute - current_time = time.time() - task_res_list: list[TaskRes] = [] - with patch("time.time", side_effect=lambda: current_time + 50): - task_res_list = state.get_task_res({task_id_0, task_id_1}) - - # Assert - assert len(task_res_list) == 2 - err_taskres = task_res_list[1] - assert err_taskres.task.HasField("error") - assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE - def test_store_task_res_task_ins_expired(self) -> None: """Test behavior of store_task_res when the TaskIns it references is expired.""" # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_ins.task.created_at = time.time() - task_ins.task.ttl + 0.5 @@ -738,8 +877,8 @@ def test_store_task_res_limit_ttl(self) -> None: ) in test_cases: # Prepare - state: State = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( consumer_node_id=0, anonymous=True, run_id=run_id @@ -769,11 +908,11 @@ def test_store_task_res_limit_ttl(self) -> None: def test_get_task_ins_not_return_expired(self) -> None: """Test get_task_ins not to return expired tasks.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_ins.task.created_at = time.time() - 5 task_ins.task.ttl = 5.0 @@ -786,14 +925,14 @@ def test_get_task_ins_not_return_expired(self) -> None: task_ins_list = state.get_task_ins(node_id=1, limit=None) assert len(task_ins_list) == 0 - def test_get_task_res_not_return_expired(self) -> None: - """Test get_task_res not to return TaskRes if its TaskIns is expired.""" + def test_get_task_res_expired_task_ins(self) -> None: + """Test get_task_res to return error TaskRes if its TaskIns has expired.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_ins.task.created_at = time.time() - 5 task_ins.task.ttl = 5.1 @@ -815,14 +954,16 @@ def test_get_task_res_not_return_expired(self) -> None: task_res_list = state.get_task_res(task_ids={task_id}) # Assert - assert len(task_res_list) == 0 + assert len(task_res_list) == 1 + assert task_res_list[0].task.HasField("error") + assert state.num_task_ins() == state.num_task_res() == 0 def test_get_task_res_returns_empty_for_missing_taskins(self) -> None: """Test that get_task_res returns an empty result when the corresponding TaskIns does not exist.""" # Prepare state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins_id = "5b0a3fc2-edba-4525-a89a-04b83420b7c8" task_res = create_task_res( @@ -837,17 +978,19 @@ def test_get_task_res_returns_empty_for_missing_taskins(self) -> None: task_res_list = state.get_task_res(task_ids={UUID(task_ins_id)}) # Assert - assert len(task_res_list) == 0 + assert len(task_res_list) == 1 + assert task_res_list[0].task.HasField("error") + assert state.num_task_ins() == state.num_task_res() == 0 def test_get_task_res_return_if_not_expired(self) -> None: """Test get_task_res to return TaskRes if its TaskIns exists and is not expired.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_ins.task.created_at = time.time() - 5 task_ins.task.ttl = 7.1 @@ -855,7 +998,7 @@ def test_get_task_res_return_if_not_expired(self) -> None: task_id = state.store_task_ins(task_ins=task_ins) task_res = create_task_res( - producer_node_id=1, + producer_node_id=node_id, anonymous=False, ancestry=[str(task_id)], run_id=run_id, @@ -875,17 +1018,18 @@ def test_store_task_res_fail_if_consumer_producer_id_mismatch(self) -> None: """Test store_task_res to fail if there is a mismatch between the consumer_node_id of taskIns and the producer_node_id of taskRes.""" # Prepare - consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run(None, None, "9f86d08", {}) + node_id = state.create_node(1e3) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) task_ins = create_task_ins( - consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + consumer_node_id=node_id, anonymous=False, run_id=run_id ) task_id = state.store_task_ins(task_ins=task_ins) task_res = create_task_res( - producer_node_id=100, # different than consumer_node_id + # Different than consumer_node_id + producer_node_id=100 if node_id != 100 else 101, anonymous=False, ancestry=[str(task_id)], run_id=run_id, @@ -897,6 +1041,150 @@ def test_store_task_res_fail_if_consumer_producer_id_mismatch(self) -> None: # Assert assert task_res_uuid is None + def test_get_set_serverapp_context(self) -> None: + """Test get and set serverapp context.""" + # Prepare + state: LinkState = self.state_factory() + context = Context( + run_id=1, + node_id=0, + node_config={"mock": "mock"}, + state=RecordSet(), + run_config={"test": "test"}, + ) + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + + # Execute + init = state.get_serverapp_context(run_id) + state.set_serverapp_context(run_id, context) + retrieved_context = state.get_serverapp_context(run_id) + + # Assert + assert init is None + assert retrieved_context == context + + def test_set_context_invalid_run_id(self) -> None: + """Test set_serverapp_context with invalid run_id.""" + # Prepare + state: LinkState = self.state_factory() + context = Context( + run_id=1, + node_id=0, + node_config={"mock": "mock"}, + state=RecordSet(), + run_config={"test": "test"}, + ) + + # Execute and assert + with self.assertRaises(ValueError): + state.set_serverapp_context(61016, context) # Invalid run_id + + def test_add_serverapp_log_invalid_run_id(self) -> None: + """Test adding serverapp log with invalid run_id.""" + # Prepare + state: LinkState = self.state_factory() + invalid_run_id = 99999 + log_entry = "Invalid log entry" + + # Execute and assert + with self.assertRaises(ValueError): + state.add_serverapp_log(invalid_run_id, log_entry) + + def test_get_serverapp_log_invalid_run_id(self) -> None: + """Test retrieving serverapp log with invalid run_id.""" + # Prepare + state: LinkState = self.state_factory() + invalid_run_id = 99999 + + # Execute and assert + with self.assertRaises(ValueError): + state.get_serverapp_log(invalid_run_id, after_timestamp=None) + + def test_add_and_get_serverapp_log(self) -> None: + """Test adding and retrieving serverapp logs.""" + # Prepare + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + log_entry_1 = "Log entry 1" + log_entry_2 = "Log entry 2" + timestamp = now().timestamp() + + # Execute + state.add_serverapp_log(run_id, log_entry_1) + state.add_serverapp_log(run_id, log_entry_2) + retrieved_logs, latest = state.get_serverapp_log( + run_id, after_timestamp=timestamp + ) + + # Assert + assert latest > timestamp + assert log_entry_1 + log_entry_2 == retrieved_logs + + def test_get_serverapp_log_after_timestamp(self) -> None: + """Test retrieving serverapp logs after a specific timestamp.""" + # Prepare + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + log_entry_1 = "Log entry 1" + log_entry_2 = "Log entry 2" + state.add_serverapp_log(run_id, log_entry_1) + timestamp = now().timestamp() + state.add_serverapp_log(run_id, log_entry_2) + + # Execute + retrieved_logs, latest = state.get_serverapp_log( + run_id, after_timestamp=timestamp + ) + + # Assert + assert latest > timestamp + assert log_entry_1 not in retrieved_logs + assert log_entry_2 == retrieved_logs + + def test_get_serverapp_log_after_timestamp_no_logs(self) -> None: + """Test retrieving serverapp logs after a specific timestamp but no logs are + found.""" + # Prepare + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}, ConfigsRecord()) + log_entry = "Log entry" + state.add_serverapp_log(run_id, log_entry) + timestamp = now().timestamp() + + # Execute + retrieved_logs, latest = state.get_serverapp_log( + run_id, after_timestamp=timestamp + ) + + # Assert + assert latest == 0 + assert retrieved_logs == "" + + def test_create_run_with_and_without_federation_options(self) -> None: + """Test that the recording and fetching of federation options works.""" + # Prepare + state = self.state_factory() + # A run w/ federation options + fed_options = ConfigsRecord({"setting-a": 123, "setting-b": [4, 5, 6]}) + run_id = state.create_run( + None, + None, + "fffffff", + {"mock_key": "mock_value"}, + federation_options=fed_options, + ) + state.update_run_status(run_id, RunStatus(Status.STARTING, "", "")) + + # Execute + fed_options_fetched = state.get_federation_options(run_id=run_id) + + # Assert + assert fed_options_fetched == fed_options + + # Generate a run_id that doesn't exist. Then check None is returned + unique_int = next(num for num in range(0, 1) if num not in {run_id}) + assert state.get_federation_options(run_id=unique_int) is None + def create_task_ins( consumer_node_id: int, @@ -918,7 +1206,7 @@ def create_task_ins( producer=Node(node_id=0, anonymous=True), consumer=consumer, task_type="mock", - recordset=RecordSet(parameters={}, metrics={}, configs={}), + recordset=ProtoRecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, created_at=time.time(), ), @@ -943,7 +1231,7 @@ def create_task_res( consumer=Node(node_id=0, anonymous=True), ancestry=ancestry, task_type="mock", - recordset=RecordSet(parameters={}, metrics={}, configs={}), + recordset=ProtoRecordSet(parameters={}, metrics={}, configs={}), ttl=DEFAULT_TTL, created_at=time.time(), ), @@ -957,9 +1245,9 @@ class InMemoryStateTest(StateTest): __test__ = True - def state_factory(self) -> State: + def state_factory(self) -> LinkState: """Return InMemoryState.""" - return InMemoryState() + return InMemoryLinkState() class SqliteInMemoryStateTest(StateTest, unittest.TestCase): @@ -967,9 +1255,9 @@ class SqliteInMemoryStateTest(StateTest, unittest.TestCase): __test__ = True - def state_factory(self) -> SqliteState: + def state_factory(self) -> SqliteLinkState: """Return SqliteState with in-memory database.""" - state = SqliteState(":memory:") + state = SqliteLinkState(":memory:") state.initialize() return state @@ -982,7 +1270,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 13 + assert len(result) == 17 class SqliteFileBasedTest(StateTest, unittest.TestCase): @@ -990,11 +1278,11 @@ class SqliteFileBasedTest(StateTest, unittest.TestCase): __test__ = True - def state_factory(self) -> SqliteState: + def state_factory(self) -> SqliteLinkState: """Return SqliteState with file-based database.""" # pylint: disable-next=consider-using-with,attribute-defined-outside-init self.tmp_file = tempfile.NamedTemporaryFile() - state = SqliteState(database_path=self.tmp_file.name) + state = SqliteLinkState(database_path=self.tmp_file.name) state.initialize() return state @@ -1007,7 +1295,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 13 + assert len(result) == 17 if __name__ == "__main__": diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py similarity index 66% rename from src/py/flwr/server/superlink/state/sqlite_state.py rename to src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py index 6d644c3b2232..54df4685bf9a 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""SQLite based implemenation of server state.""" +"""SQLite based implemenation of the link state.""" # pylint: disable=too-many-lines @@ -25,26 +25,39 @@ from typing import Any, Optional, Union, cast from uuid import UUID, uuid4 -from flwr.common import log, now +from flwr.common import Context, log, now from flwr.common.constant import ( MESSAGE_TTL_TOLERANCE, NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES, + Status, ) -from flwr.common.typing import Run, UserConfig -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 +from flwr.common.record import ConfigsRecord +from flwr.common.typing import Run, RunStatus, UserConfig + +# pylint: disable=E0611 +from flwr.proto.node_pb2 import Node +from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes + +# pylint: enable=E0611 from flwr.server.utils.validator import validate_task_ins_or_res -from .state import State +from .linkstate import LinkState from .utils import ( + configsrecord_from_bytes, + configsrecord_to_bytes, + context_from_bytes, + context_to_bytes, convert_sint64_to_uint64, convert_sint64_values_in_dict_to_uint64, convert_uint64_to_sint64, convert_uint64_values_in_dict_to_sint64, generate_rand_int_from_bytes, - make_node_unavailable_taskres, + has_valid_sub_status, + is_valid_transition, + verify_found_taskres, + verify_taskins_ids, ) SQL_CREATE_TABLE_NODE = """ @@ -79,7 +92,33 @@ fab_id TEXT, fab_version TEXT, fab_hash TEXT, - override_config TEXT + override_config TEXT, + pending_at TEXT, + starting_at TEXT, + running_at TEXT, + finished_at TEXT, + sub_status TEXT, + details TEXT, + federation_options BLOB +); +""" + +SQL_CREATE_TABLE_LOGS = """ +CREATE TABLE IF NOT EXISTS logs ( + timestamp REAL, + run_id INTEGER, + node_id INTEGER, + log TEXT, + PRIMARY KEY (timestamp, run_id, node_id), + FOREIGN KEY (run_id) REFERENCES run(run_id) +); +""" + +SQL_CREATE_TABLE_CONTEXT = """ +CREATE TABLE IF NOT EXISTS context( + run_id INTEGER UNIQUE, + context BLOB, + FOREIGN KEY(run_id) REFERENCES run(run_id) ); """ @@ -126,14 +165,14 @@ DictOrTuple = Union[tuple[Any, ...], dict[str, Any]] -class SqliteState(State): # pylint: disable=R0904 - """SQLite-based state implementation.""" +class SqliteLinkState(LinkState): # pylint: disable=R0904 + """SQLite-based LinkState implementation.""" def __init__( self, database_path: str, ) -> None: - """Initialize an SqliteState. + """Initialize an SqliteLinkState. Parameters ---------- @@ -166,6 +205,8 @@ def initialize(self, log_queries: bool = False) -> list[tuple[str]]: # Create each table if not exists queries cur.execute(SQL_CREATE_TABLE_RUN) + cur.execute(SQL_CREATE_TABLE_LOGS) + cur.execute(SQL_CREATE_TABLE_CONTEXT) cur.execute(SQL_CREATE_TABLE_TASK_INS) cur.execute(SQL_CREATE_TABLE_TASK_RES) cur.execute(SQL_CREATE_TABLE_NODE) @@ -173,7 +214,6 @@ def initialize(self, log_queries: bool = False) -> list[tuple[str]]: cur.execute(SQL_CREATE_TABLE_PUBLIC_KEY) cur.execute(SQL_CREATE_INDEX_ONLINE_UNTIL) res = cur.execute("SELECT name FROM sqlite_schema;") - return res.fetchall() def query( @@ -183,7 +223,7 @@ def query( ) -> list[dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: - raise AttributeError("State is not initialized.") + raise AttributeError("LinkState is not initialized.") if data is None: data = [] @@ -214,11 +254,11 @@ def query( def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: """Store one TaskIns. - Usually, the Driver API calls this to schedule instructions. + Usually, the ServerAppIo API calls this to schedule instructions. - Stores the value of the task_ins in the state and, if successful, returns the - task_id (UUID) of the task_ins. If, for any reason, storing the task_ins fails, - `None` is returned. + Stores the value of the task_ins in the link state and, if successful, + returns the task_id (UUID) of the task_ins. If, for any reason, storing + the task_ins fails, `None` is returned. Constraints ----------- @@ -233,7 +273,6 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: if any(errors): log(ERROR, errors) return None - # Create task_id task_id = uuid4() @@ -246,16 +285,36 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: data[0], ["run_id", "producer_node_id", "consumer_node_id"] ) + # Validate run_id + query = "SELECT run_id FROM run WHERE run_id = ?;" + if not self.query(query, (data[0]["run_id"],)): + log(ERROR, "Invalid run ID for TaskIns: %s", task_ins.run_id) + return None + # Validate source node ID + if task_ins.task.producer.node_id != 0: + log( + ERROR, + "Invalid source node ID for TaskIns: %s", + task_ins.task.producer.node_id, + ) + return None + # Validate destination node ID + query = "SELECT node_id FROM node WHERE node_id = ?;" + if not task_ins.task.consumer.anonymous: + if not self.query(query, (data[0]["consumer_node_id"],)): + log( + ERROR, + "Invalid destination node ID for TaskIns: %s", + task_ins.task.consumer.node_id, + ) + return None + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" # Only invalid run_id can trigger IntegrityError. # This may need to be changed in the future version with more integrity checks. - try: - self.query(query, data) - except sqlite3.IntegrityError: - log(ERROR, "`run` is invalid") - return None + self.query(query, data) return task_id @@ -450,150 +509,67 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # pylint: disable-next=R0912,R0915,R0914 def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: - """Get TaskRes for task_ids. - - Usually, the Driver API calls this method to get results for instructions it has - previously scheduled. + """Get TaskRes for the given TaskIns IDs.""" + ret: dict[UUID, TaskRes] = {} - Retrieves all TaskRes for the given `task_ids` and returns and empty list if - none could be found. - - Constraints - ----------- - If `limit` is not `None`, return, at most, `limit` number of TaskRes. The limit - will only take effect if enough task_ids are in the set AND are currently - available. If `limit` is set, it has to be greater than zero. - """ - # Check if corresponding TaskIns exists and is not expired - task_ids_placeholders = ",".join([f":id_{i}" for i in range(len(task_ids))]) + # Verify TaskIns IDs + current = time.time() query = f""" SELECT * FROM task_ins - WHERE task_id IN ({task_ids_placeholders}) - AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) + WHERE task_id IN ({",".join(["?"] * len(task_ids))}); """ - query += ";" - - task_ins_data = {} - for index, task_id in enumerate(task_ids): - task_ins_data[f"id_{index}"] = str(task_id) - - task_ins_rows = self.query(query, task_ins_data) - - if not task_ins_rows: - return [] - - for row in task_ins_rows: - # Convert values from sint64 to uint64 + rows = self.query(query, tuple(str(task_id) for task_id in task_ids)) + found_task_ins_dict: dict[UUID, TaskIns] = {} + for row in rows: convert_sint64_values_in_dict_to_uint64( row, ["run_id", "producer_node_id", "consumer_node_id"] ) - task_ins = dict_to_task_ins(row) - if task_ins.task.created_at + task_ins.task.ttl <= time.time(): - log(WARNING, "TaskIns with task_id %s is expired.", task_ins.task_id) - task_ids.remove(UUID(task_ins.task_id)) + found_task_ins_dict[UUID(row["task_id"])] = dict_to_task_ins(row) - # Retrieve all anonymous Tasks - if len(task_ids) == 0: - return [] + ret = verify_taskins_ids( + inquired_taskins_ids=task_ids, + found_taskins_dict=found_task_ins_dict, + current_time=current, + ) - placeholders = ",".join([f":id_{i}" for i in range(len(task_ids))]) + # Find all TaskRes query = f""" SELECT * FROM task_res - WHERE ancestry IN ({placeholders}) - AND delivered_at = "" + WHERE ancestry IN ({",".join(["?"] * len(task_ids))}) + AND delivered_at = ""; """ + rows = self.query(query, tuple(str(task_id) for task_id in task_ids)) + for row in rows: + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + tmp_ret_dict = verify_found_taskres( + inquired_taskins_ids=task_ids, + found_taskins_dict=found_task_ins_dict, + found_taskres_list=[dict_to_task_res(row) for row in rows], + current_time=current, + ) + ret.update(tmp_ret_dict) - data: dict[str, Union[str, float, int]] = {} - - query += ";" - - for index, task_id in enumerate(task_ids): - data[f"id_{index}"] = str(task_id) - - rows = self.query(query, data) - - if rows: - # Prepare query - found_task_ids = [row["task_id"] for row in rows] - placeholders = ",".join([f":id_{i}" for i in range(len(found_task_ids))]) - query = f""" - UPDATE task_res - SET delivered_at = :delivered_at - WHERE task_id IN ({placeholders}) - RETURNING *; - """ - - # Prepare data for query - delivered_at = now().isoformat() - data = {"delivered_at": delivered_at} - for index, task_id in enumerate(found_task_ids): - data[f"id_{index}"] = str(task_id) - - # Run query - rows = self.query(query, data) - - for row in rows: - # Convert values from sint64 to uint64 - convert_sint64_values_in_dict_to_uint64( - row, ["run_id", "producer_node_id", "consumer_node_id"] - ) - - result = [dict_to_task_res(row) for row in rows] - - # 1. Query: Fetch consumer_node_id of remaining task_ids - # Assume the ancestry field only contains one element - data.clear() - replied_task_ids: set[UUID] = {UUID(str(row["ancestry"])) for row in rows} - remaining_task_ids = task_ids - replied_task_ids - placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) - query = f""" - SELECT consumer_node_id - FROM task_ins - WHERE task_id IN ({placeholders}); - """ - for index, task_id in enumerate(remaining_task_ids): - data[f"id_{index}"] = str(task_id) - node_ids = [int(row["consumer_node_id"]) for row in self.query(query, data)] - - # 2. Query: Select offline nodes - placeholders = ",".join([f":id_{i}" for i in range(len(node_ids))]) + # Mark existing TaskRes to be returned as delivered + delivered_at = now().isoformat() + for task_res in ret.values(): + task_res.task.delivered_at = delivered_at + task_res_ids = [task_res.task_id for task_res in ret.values()] query = f""" - SELECT node_id - FROM node - WHERE node_id IN ({placeholders}) - AND online_until < :time; + UPDATE task_res + SET delivered_at = ? + WHERE task_id IN ({",".join(["?"] * len(task_res_ids))}); """ - data = {f"id_{i}": str(node_id) for i, node_id in enumerate(node_ids)} - data["time"] = time.time() - offline_node_ids = [int(row["node_id"]) for row in self.query(query, data)] + data: list[Any] = [delivered_at] + task_res_ids + self.query(query, data) - # 3. Query: Select TaskIns for offline nodes - placeholders = ",".join([f":id_{i}" for i in range(len(offline_node_ids))]) - query = f""" - SELECT * - FROM task_ins - WHERE consumer_node_id IN ({placeholders}); - """ - data = {f"id_{i}": str(node_id) for i, node_id in enumerate(offline_node_ids)} - task_ins_rows = self.query(query, data) - - # Make TaskRes containing node unavailabe error - for row in task_ins_rows: - for row in rows: - # Convert values from sint64 to uint64 - convert_sint64_values_in_dict_to_uint64( - row, ["run_id", "producer_node_id", "consumer_node_id"] - ) + # Cleanup + self._force_delete_tasks_by_ids(set(ret.keys())) - task_ins = dict_to_task_ins(row) - err_taskres = make_node_unavailable_taskres( - ref_taskins=task_ins, - ) - result.append(err_taskres) - - return result + return list(ret.values()) def num_task_ins(self) -> int: """Calculate the number of task_ins in store. @@ -645,7 +621,7 @@ def delete_tasks(self, task_ids: set[UUID]) -> None: """ if self.conn is None: - raise AttributeError("State not intitialized") + raise AttributeError("LinkState not intitialized") with self.conn: self.conn.execute(query_1, data) @@ -653,10 +629,36 @@ def delete_tasks(self, task_ids: set[UUID]) -> None: return None + def _force_delete_tasks_by_ids(self, task_ids: set[UUID]) -> None: + """Delete tasks based on a set of TaskIns IDs.""" + if not task_ids: + return + if self.conn is None: + raise AttributeError("LinkState not initialized") + + placeholders = ",".join([f":id_{index}" for index in range(len(task_ids))]) + data = {f"id_{index}": str(task_id) for index, task_id in enumerate(task_ids)} + + # Delete task_ins + query_1 = f""" + DELETE FROM task_ins + WHERE task_id IN ({placeholders}); + """ + + # Delete task_res + query_2 = f""" + DELETE FROM task_res + WHERE ancestry IN ({placeholders}); + """ + + with self.conn: + self.conn.execute(query_1, data) + self.conn.execute(query_2, data) + def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" + """Create, store in the link state, and return `node_id`.""" # Sample a random uint64 as node_id uint64_node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) @@ -706,7 +708,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: params += (public_key,) # type: ignore if self.conn is None: - raise AttributeError("State is not initialized.") + raise AttributeError("LinkState is not initialized.") try: with self.conn: @@ -753,12 +755,14 @@ def get_node_id(self, node_public_key: bytes) -> Optional[int]: return uint64_node_id return None + # pylint: disable=too-many-arguments,too-many-positional-arguments def create_run( self, fab_id: Optional[str], fab_version: Optional[str], fab_hash: Optional[str], override_config: UserConfig, + federation_options: ConfigsRecord, ) -> int: """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id @@ -773,26 +777,30 @@ def create_run( if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: query = ( "INSERT INTO run " - "(run_id, fab_id, fab_version, fab_hash, override_config)" - "VALUES (?, ?, ?, ?, ?);" + "(run_id, fab_id, fab_version, fab_hash, override_config, " + "federation_options, pending_at, starting_at, running_at, finished_at, " + "sub_status, details) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);" ) if fab_hash: - self.query( - query, - (sint64_run_id, "", "", fab_hash, json.dumps(override_config)), - ) - else: - self.query( - query, - ( - sint64_run_id, - fab_id, - fab_version, - "", - json.dumps(override_config), - ), - ) - # Note: we need to return the uint64 value of the run_id + fab_id, fab_version = "", "" + override_config_json = json.dumps(override_config) + data = [ + sint64_run_id, + fab_id, + fab_version, + fab_hash, + override_config_json, + configsrecord_to_bytes(federation_options), + ] + data += [ + now().isoformat(), + "", + "", + "", + "", + "", + ] + self.query(query, tuple(data)) return uint64_run_id log(ERROR, "Unexpected run creation failure.") return 0 @@ -800,7 +808,7 @@ def create_run( def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" query = "SELECT COUNT(*) FROM credential" count = self.query(query)[0]["COUNT(*)"] if count < 1: @@ -833,13 +841,13 @@ def get_server_public_key(self) -> Optional[bytes]: return public_key def store_node_public_keys(self, public_keys: set[bytes]) -> None: - """Store a set of `node_public_keys` in state.""" + """Store a set of `node_public_keys` in the link state.""" query = "INSERT INTO public_key (public_key) VALUES (?)" data = [(key,) for key in public_keys] self.query(query, data) def store_node_public_key(self, public_key: bytes) -> None: - """Store a `node_public_key` in state.""" + """Store a `node_public_key` in the link state.""" query = "INSERT INTO public_key (public_key) VALUES (:public_key)" self.query(query, {"public_key": public_key}) @@ -850,6 +858,12 @@ def get_node_public_keys(self) -> set[bytes]: result: set[bytes] = {row["public_key"] for row in rows} return result + def get_run_ids(self) -> set[int]: + """Retrieve all run IDs.""" + query = "SELECT run_id FROM run;" + rows = self.query(query) + return {convert_sint64_to_uint64(row["run_id"]) for row in rows} + def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" # Convert the uint64 value to sint64 for SQLite @@ -864,10 +878,122 @@ def get_run(self, run_id: int) -> Optional[Run]: fab_version=row["fab_version"], fab_hash=row["fab_hash"], override_config=json.loads(row["override_config"]), + pending_at=row["pending_at"], + starting_at=row["starting_at"], + running_at=row["running_at"], + finished_at=row["finished_at"], + status=RunStatus( + status=determine_run_status(row), + sub_status=row["sub_status"], + details=row["details"], + ), ) log(ERROR, "`run_id` does not exist.") return None + def get_run_status(self, run_ids: set[int]) -> dict[int, RunStatus]: + """Retrieve the statuses for the specified runs.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_ids = (convert_uint64_to_sint64(run_id) for run_id in set(run_ids)) + query = f"SELECT * FROM run WHERE run_id IN ({','.join(['?'] * len(run_ids))});" + rows = self.query(query, tuple(sint64_run_ids)) + + return { + # Restore uint64 run IDs + convert_sint64_to_uint64(row["run_id"]): RunStatus( + status=determine_run_status(row), + sub_status=row["sub_status"], + details=row["details"], + ) + for row in rows + } + + def update_run_status(self, run_id: int, new_status: RunStatus) -> bool: + """Update the status of the run with the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + query = "SELECT * FROM run WHERE run_id = ?;" + rows = self.query(query, (sint64_run_id,)) + + # Check if the run_id exists + if not rows: + log(ERROR, "`run_id` is invalid") + return False + + # Check if the status transition is valid + row = rows[0] + current_status = RunStatus( + status=determine_run_status(row), + sub_status=row["sub_status"], + details=row["details"], + ) + if not is_valid_transition(current_status, new_status): + log( + ERROR, + 'Invalid status transition: from "%s" to "%s"', + current_status.status, + new_status.status, + ) + return False + + # Check if the sub-status is valid + if not has_valid_sub_status(current_status): + log( + ERROR, + 'Invalid sub-status "%s" for status "%s"', + current_status.sub_status, + current_status.status, + ) + return False + + # Update the status + query = "UPDATE run SET %s= ?, sub_status = ?, details = ? " + query += "WHERE run_id = ?;" + + timestamp_fld = "" + if new_status.status == Status.STARTING: + timestamp_fld = "starting_at" + elif new_status.status == Status.RUNNING: + timestamp_fld = "running_at" + elif new_status.status == Status.FINISHED: + timestamp_fld = "finished_at" + + data = ( + now().isoformat(), + new_status.sub_status, + new_status.details, + sint64_run_id, + ) + self.query(query % timestamp_fld, data) + return True + + def get_pending_run_id(self) -> Optional[int]: + """Get the `run_id` of a run with `Status.PENDING` status, if any.""" + pending_run_id = None + + # Fetch all runs with unset `starting_at` (i.e. they are in PENDING status) + query = "SELECT * FROM run WHERE starting_at = '' LIMIT 1;" + rows = self.query(query) + if rows: + pending_run_id = convert_sint64_to_uint64(rows[0]["run_id"]) + + return pending_run_id + + def get_federation_options(self, run_id: int) -> Optional[ConfigsRecord]: + """Retrieve the federation options for the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + query = "SELECT federation_options FROM run WHERE run_id = ?;" + rows = self.query(query, (sint64_run_id,)) + + # Check if the run_id exists + if not rows: + log(ERROR, "`run_id` is invalid") + return None + + row = rows[0] + return configsrecord_from_bytes(row["federation_options"]) + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" sint64_node_id = convert_uint64_to_sint64(node_id) @@ -883,6 +1009,72 @@ def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: log(ERROR, "`node_id` does not exist.") return False + def get_serverapp_context(self, run_id: int) -> Optional[Context]: + """Get the context for the specified `run_id`.""" + # Retrieve context if any + query = "SELECT context FROM context WHERE run_id = ?;" + rows = self.query(query, (convert_uint64_to_sint64(run_id),)) + context = context_from_bytes(rows[0]["context"]) if rows else None + return context + + def set_serverapp_context(self, run_id: int, context: Context) -> None: + """Set the context for the specified `run_id`.""" + # Convert context to bytes + context_bytes = context_to_bytes(context) + sint_run_id = convert_uint64_to_sint64(run_id) + + # Check if any existing Context assigned to the run_id + query = "SELECT COUNT(*) FROM context WHERE run_id = ?;" + if self.query(query, (sint_run_id,))[0]["COUNT(*)"] > 0: + # Update context + query = "UPDATE context SET context = ? WHERE run_id = ?;" + self.query(query, (context_bytes, sint_run_id)) + else: + try: + # Store context + query = "INSERT INTO context (run_id, context) VALUES (?, ?);" + self.query(query, (sint_run_id, context_bytes)) + except sqlite3.IntegrityError: + raise ValueError(f"Run {run_id} not found") from None + + def add_serverapp_log(self, run_id: int, log_message: str) -> None: + """Add a log entry to the ServerApp logs for the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + + # Store log + try: + query = """ + INSERT INTO logs (timestamp, run_id, node_id, log) VALUES (?, ?, ?, ?); + """ + self.query(query, (now().timestamp(), sint64_run_id, 0, log_message)) + except sqlite3.IntegrityError: + raise ValueError(f"Run {run_id} not found") from None + + def get_serverapp_log( + self, run_id: int, after_timestamp: Optional[float] + ) -> tuple[str, float]: + """Get the ServerApp logs for the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + + # Check if the run_id exists + query = "SELECT run_id FROM run WHERE run_id = ?;" + if not self.query(query, (sint64_run_id,)): + raise ValueError(f"Run {run_id} not found") + + # Retrieve logs + if after_timestamp is None: + after_timestamp = 0.0 + query = """ + SELECT log, timestamp FROM logs + WHERE run_id = ? AND node_id = ? AND timestamp > ?; + """ + rows = self.query(query, (sint64_run_id, 0, after_timestamp)) + rows.sort(key=lambda x: x["timestamp"]) + latest_timestamp = rows[-1]["timestamp"] if rows else 0.0 + return "".join(row["log"] for row in rows), latest_timestamp + def get_valid_task_ins(self, task_id: str) -> Optional[dict[str, Any]]: """Check if the TaskIns exists and is valid (not expired). @@ -967,7 +1159,7 @@ def task_res_to_dict(task_msg: TaskRes) -> dict[str, Any]: def dict_to_task_ins(task_dict: dict[str, Any]) -> TaskIns: """Turn task_dict into protobuf message.""" - recordset = RecordSet() + recordset = ProtoRecordSet() recordset.ParseFromString(task_dict["recordset"]) result = TaskIns( @@ -997,7 +1189,7 @@ def dict_to_task_ins(task_dict: dict[str, Any]) -> TaskIns: def dict_to_task_res(task_dict: dict[str, Any]) -> TaskRes: """Turn task_dict into protobuf message.""" - recordset = RecordSet() + recordset = ProtoRecordSet() recordset.ParseFromString(task_dict["recordset"]) result = TaskRes( @@ -1023,3 +1215,17 @@ def dict_to_task_res(task_dict: dict[str, Any]) -> TaskRes: ), ) return result + + +def determine_run_status(row: dict[str, Any]) -> str: + """Determine the status of the run based on timestamp fields.""" + if row["pending_at"]: + if row["finished_at"]: + return Status.FINISHED + if row["starting_at"]: + if row["running_at"]: + return Status.RUNNING + return Status.STARTING + return Status.PENDING + run_id = convert_sint64_to_uint64(row["run_id"]) + raise sqlite3.IntegrityError(f"The run {run_id} does not have a valid status.") diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py similarity index 91% rename from src/py/flwr/server/superlink/state/sqlite_state_test.py rename to src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py index 10e12da96bd5..ed2960ef76fa 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py @@ -17,8 +17,8 @@ import unittest -from flwr.server.superlink.state.sqlite_state import task_ins_to_dict -from flwr.server.superlink.state.state_test import create_task_ins +from flwr.server.superlink.linkstate.linkstate_test import create_task_ins +from flwr.server.superlink.linkstate.sqlite_linkstate import task_ins_to_dict class SqliteStateTest(unittest.TestCase): diff --git a/src/py/flwr/server/superlink/linkstate/utils.py b/src/py/flwr/server/superlink/linkstate/utils.py new file mode 100644 index 000000000000..2b73221eae3c --- /dev/null +++ b/src/py/flwr/server/superlink/linkstate/utils.py @@ -0,0 +1,389 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for State.""" + + +from logging import ERROR +from os import urandom +from typing import Optional, Union +from uuid import UUID, uuid4 + +from flwr.common import ConfigsRecord, Context, log, now, serde +from flwr.common.constant import ErrorCode, Status, SubStatus +from flwr.common.typing import RunStatus + +# pylint: disable=E0611 +from flwr.proto.error_pb2 import Error +from flwr.proto.message_pb2 import Context as ProtoContext +from flwr.proto.node_pb2 import Node +from flwr.proto.recordset_pb2 import ConfigsRecord as ProtoConfigsRecord +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes + +# pylint: enable=E0611 + +NODE_UNAVAILABLE_ERROR_REASON = ( + "Error: Node Unavailable - The destination node is currently unavailable. " + "It exceeds the time limit specified in its last ping." +) + +VALID_RUN_STATUS_TRANSITIONS = { + (Status.PENDING, Status.STARTING), + (Status.STARTING, Status.RUNNING), + (Status.RUNNING, Status.FINISHED), + # Any non-FINISHED status can transition to FINISHED + (Status.PENDING, Status.FINISHED), + (Status.STARTING, Status.FINISHED), +} +VALID_RUN_SUB_STATUSES = { + SubStatus.COMPLETED, + SubStatus.FAILED, + SubStatus.STOPPED, +} +MESSAGE_UNAVAILABLE_ERROR_REASON = ( + "Error: Message Unavailable - The requested message could not be found in the " + "database. It may have expired due to its TTL or never existed." +) +REPLY_MESSAGE_UNAVAILABLE_ERROR_REASON = ( + "Error: Reply Message Unavailable - The reply message has expired." +) + + +def generate_rand_int_from_bytes(num_bytes: int) -> int: + """Generate a random unsigned integer from `num_bytes` bytes.""" + return int.from_bytes(urandom(num_bytes), "little", signed=False) + + +def convert_uint64_to_sint64(u: int) -> int: + """Convert a uint64 value to a sint64 value with the same bit sequence. + + Parameters + ---------- + u : int + The unsigned 64-bit integer to convert. + + Returns + ------- + int + The signed 64-bit integer equivalent. + + The signed 64-bit integer will have the same bit pattern as the + unsigned 64-bit integer but may have a different decimal value. + + For numbers within the range [0, `sint64` max value], the decimal + value remains the same. However, for numbers greater than the `sint64` + max value, the decimal value will differ due to the wraparound caused + by the sign bit. + """ + if u >= (1 << 63): + return u - (1 << 64) + return u + + +def convert_sint64_to_uint64(s: int) -> int: + """Convert a sint64 value to a uint64 value with the same bit sequence. + + Parameters + ---------- + s : int + The signed 64-bit integer to convert. + + Returns + ------- + int + The unsigned 64-bit integer equivalent. + + The unsigned 64-bit integer will have the same bit pattern as the + signed 64-bit integer but may have a different decimal value. + + For negative `sint64` values, the conversion adds 2^64 to the + signed value to obtain the equivalent `uint64` value. For non-negative + `sint64` values, the decimal value remains unchanged in the `uint64` + representation. + """ + if s < 0: + return s + (1 << 64) + return s + + +def convert_uint64_values_in_dict_to_sint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert uint64 values to sint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_uint64_to_sint64(data_dict[key]) + + +def convert_sint64_values_in_dict_to_uint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert sint64 values to uint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_sint64_to_uint64(data_dict[key]) + + +def context_to_bytes(context: Context) -> bytes: + """Serialize `Context` to bytes.""" + return serde.context_to_proto(context).SerializeToString() + + +def context_from_bytes(context_bytes: bytes) -> Context: + """Deserialize `Context` from bytes.""" + return serde.context_from_proto(ProtoContext.FromString(context_bytes)) + + +def configsrecord_to_bytes(configs_record: ConfigsRecord) -> bytes: + """Serialize a `ConfigsRecord` to bytes.""" + return serde.configs_record_to_proto(configs_record).SerializeToString() + + +def configsrecord_from_bytes(configsrecord_bytes: bytes) -> ConfigsRecord: + """Deserialize `ConfigsRecord` from bytes.""" + return serde.configs_record_from_proto( + ProtoConfigsRecord.FromString(configsrecord_bytes) + ) + + +def is_valid_transition(current_status: RunStatus, new_status: RunStatus) -> bool: + """Check if a transition between two run statuses is valid. + + Parameters + ---------- + current_status : RunStatus + The current status of the run. + new_status : RunStatus + The new status to transition to. + + Returns + ------- + bool + True if the transition is valid, False otherwise. + """ + # Transition to FINISHED from a non-RUNNING status is only allowed + # if the sub-status is not COMPLETED + if ( + current_status.status in [Status.PENDING, Status.STARTING] + and new_status.status == Status.FINISHED + ): + return new_status.sub_status != SubStatus.COMPLETED + + return ( + current_status.status, + new_status.status, + ) in VALID_RUN_STATUS_TRANSITIONS + + +def has_valid_sub_status(status: RunStatus) -> bool: + """Check if the 'sub_status' field of the given status is valid. + + Parameters + ---------- + status : RunStatus + The status object to be checked. + + Returns + ------- + bool + True if the status object has a valid sub-status, False otherwise. + + Notes + ----- + Only an empty string (i.e., "") is considered a valid sub-status for + non-finished statuses. The sub-status of a finished status cannot be empty. + """ + if status.status == Status.FINISHED: + return status.sub_status in VALID_RUN_SUB_STATUSES + return status.sub_status == "" + + +def create_taskres_for_unavailable_taskins(taskins_id: Union[str, UUID]) -> TaskRes: + """Generate a TaskRes with a TaskIns unavailable error. + + Parameters + ---------- + taskins_id : Union[str, UUID] + The ID of the unavailable TaskIns. + + Returns + ------- + TaskRes + A TaskRes with an error code MESSAGE_UNAVAILABLE to indicate that the + inquired TaskIns ID cannot be found (due to non-existence or expiration). + """ + current_time = now().timestamp() + return TaskRes( + task_id=str(uuid4()), + group_id="", # Unknown group ID + run_id=0, # Unknown run ID + task=Task( + # This function is only called by SuperLink, and thus it's the producer. + producer=Node(node_id=0, anonymous=False), + consumer=Node(node_id=0, anonymous=False), + created_at=current_time, + ttl=0, + ancestry=[str(taskins_id)], + task_type="", # Unknown message type + error=Error( + code=ErrorCode.MESSAGE_UNAVAILABLE, + reason=MESSAGE_UNAVAILABLE_ERROR_REASON, + ), + ), + ) + + +def create_taskres_for_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: + """Generate a TaskRes with a reply message unavailable error from a TaskIns. + + Parameters + ---------- + ref_taskins : TaskIns + The reference TaskIns object. + + Returns + ------- + TaskRes + The generated TaskRes with an error code REPLY_MESSAGE_UNAVAILABLE_ERROR_REASON, + indicating that the original TaskRes has expired. + """ + current_time = now().timestamp() + ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) + if ttl < 0: + log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") + ttl = 0 + return TaskRes( + task_id=str(uuid4()), + group_id=ref_taskins.group_id, + run_id=ref_taskins.run_id, + task=Task( + # This function is only called by SuperLink, and thus it's the producer. + producer=Node(node_id=0, anonymous=False), + consumer=Node(node_id=0, anonymous=False), + created_at=current_time, + ttl=ttl, + ancestry=[ref_taskins.task_id], + task_type=ref_taskins.task.task_type, + error=Error( + code=ErrorCode.REPLY_MESSAGE_UNAVAILABLE, + reason=REPLY_MESSAGE_UNAVAILABLE_ERROR_REASON, + ), + ), + ) + + +def has_expired(task_ins_or_res: Union[TaskIns, TaskRes], current_time: float) -> bool: + """Check if the TaskIns/TaskRes has expired.""" + return task_ins_or_res.task.ttl + task_ins_or_res.task.created_at < current_time + + +def verify_taskins_ids( + inquired_taskins_ids: set[UUID], + found_taskins_dict: dict[UUID, TaskIns], + current_time: Optional[float] = None, + update_set: bool = True, +) -> dict[UUID, TaskRes]: + """Verify found TaskIns and generate error TaskRes for invalid ones. + + Parameters + ---------- + inquired_taskins_ids : set[UUID] + Set of TaskIns IDs for which to generate error TaskRes if invalid. + found_taskins_dict : dict[UUID, TaskIns] + Dictionary containing all found TaskIns indexed by their IDs. + current_time : Optional[float] (default: None) + The current time to check for expiration. If set to `None`, the current time + will automatically be set to the current timestamp using `now().timestamp()`. + update_set : bool (default: True) + If True, the `inquired_taskins_ids` will be updated to remove invalid ones, + by default True. + + Returns + ------- + dict[UUID, TaskRes] + A dictionary of error TaskRes indexed by the corresponding TaskIns ID. + """ + ret_dict = {} + current = current_time if current_time else now().timestamp() + for taskins_id in list(inquired_taskins_ids): + # Generate error TaskRes if the task_ins doesn't exist or has expired + taskins = found_taskins_dict.get(taskins_id) + if taskins is None or has_expired(taskins, current): + if update_set: + inquired_taskins_ids.remove(taskins_id) + taskres = create_taskres_for_unavailable_taskins(taskins_id) + ret_dict[taskins_id] = taskres + return ret_dict + + +def verify_found_taskres( + inquired_taskins_ids: set[UUID], + found_taskins_dict: dict[UUID, TaskIns], + found_taskres_list: list[TaskRes], + current_time: Optional[float] = None, + update_set: bool = True, +) -> dict[UUID, TaskRes]: + """Verify found TaskRes and generate error TaskRes for invalid ones. + + Parameters + ---------- + inquired_taskins_ids : set[UUID] + Set of TaskIns IDs for which to generate error TaskRes if invalid. + found_taskins_dict : dict[UUID, TaskIns] + Dictionary containing all found TaskIns indexed by their IDs. + found_taskres_list : dict[TaskIns, TaskRes] + List of found TaskRes to be verified. + current_time : Optional[float] (default: None) + The current time to check for expiration. If set to `None`, the current time + will automatically be set to the current timestamp using `now().timestamp()`. + update_set : bool (default: True) + If True, the `inquired_taskins_ids` will be updated to remove ones + that have a TaskRes, by default True. + + Returns + ------- + dict[UUID, TaskRes] + A dictionary of TaskRes indexed by the corresponding TaskIns ID. + """ + ret_dict: dict[UUID, TaskRes] = {} + current = current_time if current_time else now().timestamp() + for taskres in found_taskres_list: + taskins_id = UUID(taskres.task.ancestry[0]) + if update_set: + inquired_taskins_ids.remove(taskins_id) + # Check if the TaskRes has expired + if has_expired(taskres, current): + # No need to insert the error TaskRes + taskres = create_taskres_for_unavailable_taskres( + found_taskins_dict[taskins_id] + ) + taskres.task.delivered_at = now().isoformat() + ret_dict[taskins_id] = taskres + return ret_dict diff --git a/src/py/flwr/server/superlink/state/utils_test.py b/src/py/flwr/server/superlink/linkstate/utils_test.py similarity index 100% rename from src/py/flwr/server/superlink/state/utils_test.py rename to src/py/flwr/server/superlink/linkstate/utils_test.py diff --git a/src/docker/superexec/Dockerfile b/src/py/flwr/server/superlink/simulation/__init__.py similarity index 85% rename from src/docker/superexec/Dockerfile rename to src/py/flwr/server/superlink/simulation/__init__.py index 9e4cc722921e..8485a3c9a3c7 100644 --- a/src/docker/superexec/Dockerfile +++ b/src/py/flwr/server/superlink/simulation/__init__.py @@ -12,9 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== - -ARG BASE_REPOSITORY=flwr/base -ARG BASE_IMAGE -FROM $BASE_REPOSITORY:$BASE_IMAGE - -ENTRYPOINT ["flower-superexec"] +"""Flower SimulationIo service.""" diff --git a/src/py/flwr/server/superlink/simulation/simulationio_grpc.py b/src/py/flwr/server/superlink/simulation/simulationio_grpc.py new file mode 100644 index 000000000000..d1e79306e0b7 --- /dev/null +++ b/src/py/flwr/server/superlink/simulation/simulationio_grpc.py @@ -0,0 +1,65 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SimulationIo gRPC API.""" + + +from logging import INFO +from typing import Optional + +import grpc + +from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.logger import log +from flwr.proto.simulationio_pb2_grpc import ( # pylint: disable=E0611 + add_SimulationIoServicer_to_server, +) +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkStateFactory + +from ..fleet.grpc_bidi.grpc_server import generic_create_grpc_server +from .simulationio_servicer import SimulationIoServicer + + +def run_simulationio_api_grpc( + address: str, + state_factory: LinkStateFactory, + ffs_factory: FfsFactory, + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> grpc.Server: + """Run SimulationIo API (gRPC, request-response).""" + # Create SimulationIo API gRPC server + simulationio_servicer: grpc.Server = SimulationIoServicer( + state_factory=state_factory, + ffs_factory=ffs_factory, + ) + simulationio_add_servicer_to_server_fn = add_SimulationIoServicer_to_server + simulationio_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=( + simulationio_servicer, + simulationio_add_servicer_to_server_fn, + ), + server_address=address, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + certificates=certificates, + ) + + log( + INFO, + "Flower Simulation Engine: Starting SimulationIo API on %s", + address, + ) + simulationio_grpc_server.start() + + return simulationio_grpc_server diff --git a/src/py/flwr/server/superlink/simulation/simulationio_servicer.py b/src/py/flwr/server/superlink/simulation/simulationio_servicer.py new file mode 100644 index 000000000000..f1bbf3be1fab --- /dev/null +++ b/src/py/flwr/server/superlink/simulation/simulationio_servicer.py @@ -0,0 +1,153 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SimulationIo API servicer.""" + +import threading +from logging import DEBUG, INFO + +import grpc +from grpc import ServicerContext + +from flwr.common.constant import Status +from flwr.common.logger import log +from flwr.common.serde import ( + configs_record_to_proto, + context_from_proto, + context_to_proto, + fab_to_proto, + run_status_from_proto, + run_to_proto, +) +from flwr.common.typing import Fab, RunStatus +from flwr.proto import simulationio_pb2_grpc +from flwr.proto.log_pb2 import ( # pylint: disable=E0611 + PushLogsRequest, + PushLogsResponse, +) +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + GetFederationOptionsRequest, + GetFederationOptionsResponse, + UpdateRunStatusRequest, + UpdateRunStatusResponse, +) +from flwr.proto.simulationio_pb2 import ( # pylint: disable=E0611 + PullSimulationInputsRequest, + PullSimulationInputsResponse, + PushSimulationOutputsRequest, + PushSimulationOutputsResponse, +) +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkStateFactory + + +class SimulationIoServicer(simulationio_pb2_grpc.SimulationIoServicer): + """SimulationIo API servicer.""" + + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: + self.state_factory = state_factory + self.ffs_factory = ffs_factory + self.lock = threading.RLock() + + def PullSimulationInputs( + self, request: PullSimulationInputsRequest, context: ServicerContext + ) -> PullSimulationInputsResponse: + """Pull SimultionIo process inputs.""" + log(DEBUG, "SimultionIoServicer.SimultionIoInputs") + # Init access to LinkState and Ffs + state = self.state_factory.state() + ffs = self.ffs_factory.ffs() + + # Lock access to LinkState, preventing obtaining the same pending run_id + with self.lock: + # Attempt getting the run_id of a pending run + run_id = state.get_pending_run_id() + # If there's no pending run, return an empty response + if run_id is None: + return PullSimulationInputsResponse() + + # Retrieve Context, Run and Fab for the run_id + serverapp_ctxt = state.get_serverapp_context(run_id) + run = state.get_run(run_id) + fab = None + if run and run.fab_hash: + if result := ffs.get(run.fab_hash): + fab = Fab(run.fab_hash, result[0]) + if run and fab and serverapp_ctxt: + # Update run status to STARTING + if state.update_run_status(run_id, RunStatus(Status.STARTING, "", "")): + log(INFO, "Starting run %d", run_id) + return PullSimulationInputsResponse( + context=context_to_proto(serverapp_ctxt), + run=run_to_proto(run), + fab=fab_to_proto(fab), + ) + + # Raise an exception if the Run or Fab is not found, + # or if the status cannot be updated to STARTING + raise RuntimeError(f"Failed to start run {run_id}") + + def PushSimulationOutputs( + self, request: PushSimulationOutputsRequest, context: ServicerContext + ) -> PushSimulationOutputsResponse: + """Push Simulation process outputs.""" + log(DEBUG, "SimultionIoServicer.PushSimulationOutputs") + state = self.state_factory.state() + state.set_serverapp_context(request.run_id, context_from_proto(request.context)) + return PushSimulationOutputsResponse() + + def UpdateRunStatus( + self, request: UpdateRunStatusRequest, context: grpc.ServicerContext + ) -> UpdateRunStatusResponse: + """Update the status of a run.""" + log(DEBUG, "SimultionIoServicer.UpdateRunStatus") + state = self.state_factory.state() + + # Update the run status + state.update_run_status( + run_id=request.run_id, new_status=run_status_from_proto(request.run_status) + ) + return UpdateRunStatusResponse() + + def PushLogs( + self, request: PushLogsRequest, context: grpc.ServicerContext + ) -> PushLogsResponse: + """Push logs.""" + log(DEBUG, "SimultionIoServicer.PushLogs") + state = self.state_factory.state() + + # Add logs to LinkState + merged_logs = "".join(request.logs) + state.add_serverapp_log(request.run_id, merged_logs) + return PushLogsResponse() + + def GetFederationOptions( + self, request: GetFederationOptionsRequest, context: ServicerContext + ) -> GetFederationOptionsResponse: + """Get Federation Options associated with a run.""" + log(DEBUG, "SimultionIoServicer.GetFederationOptions") + state = self.state_factory.state() + + federation_options = state.get_federation_options(request.run_id) + if federation_options is None: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "Expected federation options to be set, but none available.", + ) + return GetFederationOptionsResponse() + return GetFederationOptionsResponse( + federation_options=configs_record_to_proto(federation_options) + ) diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py deleted file mode 100644 index db44719c6a8a..000000000000 --- a/src/py/flwr/server/superlink/state/utils.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2024 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for State.""" - - -import time -from logging import ERROR -from os import urandom -from uuid import uuid4 - -from flwr.common import log -from flwr.common.constant import ErrorCode -from flwr.proto.error_pb2 import Error # pylint: disable=E0611 -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 - -NODE_UNAVAILABLE_ERROR_REASON = ( - "Error: Node Unavailable - The destination node is currently unavailable. " - "It exceeds the time limit specified in its last ping." -) - - -def generate_rand_int_from_bytes(num_bytes: int) -> int: - """Generate a random unsigned integer from `num_bytes` bytes.""" - return int.from_bytes(urandom(num_bytes), "little", signed=False) - - -def convert_uint64_to_sint64(u: int) -> int: - """Convert a uint64 value to a sint64 value with the same bit sequence. - - Parameters - ---------- - u : int - The unsigned 64-bit integer to convert. - - Returns - ------- - int - The signed 64-bit integer equivalent. - - The signed 64-bit integer will have the same bit pattern as the - unsigned 64-bit integer but may have a different decimal value. - - For numbers within the range [0, `sint64` max value], the decimal - value remains the same. However, for numbers greater than the `sint64` - max value, the decimal value will differ due to the wraparound caused - by the sign bit. - """ - if u >= (1 << 63): - return u - (1 << 64) - return u - - -def convert_sint64_to_uint64(s: int) -> int: - """Convert a sint64 value to a uint64 value with the same bit sequence. - - Parameters - ---------- - s : int - The signed 64-bit integer to convert. - - Returns - ------- - int - The unsigned 64-bit integer equivalent. - - The unsigned 64-bit integer will have the same bit pattern as the - signed 64-bit integer but may have a different decimal value. - - For negative `sint64` values, the conversion adds 2^64 to the - signed value to obtain the equivalent `uint64` value. For non-negative - `sint64` values, the decimal value remains unchanged in the `uint64` - representation. - """ - if s < 0: - return s + (1 << 64) - return s - - -def convert_uint64_values_in_dict_to_sint64( - data_dict: dict[str, int], keys: list[str] -) -> None: - """Convert uint64 values to sint64 in the given dictionary. - - Parameters - ---------- - data_dict : dict[str, int] - A dictionary where the values are integers to be converted. - keys : list[str] - A list of keys in the dictionary whose values need to be converted. - """ - for key in keys: - if key in data_dict: - data_dict[key] = convert_uint64_to_sint64(data_dict[key]) - - -def convert_sint64_values_in_dict_to_uint64( - data_dict: dict[str, int], keys: list[str] -) -> None: - """Convert sint64 values to uint64 in the given dictionary. - - Parameters - ---------- - data_dict : dict[str, int] - A dictionary where the values are integers to be converted. - keys : list[str] - A list of keys in the dictionary whose values need to be converted. - """ - for key in keys: - if key in data_dict: - data_dict[key] = convert_sint64_to_uint64(data_dict[key]) - - -def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: - """Generate a TaskRes with a node unavailable error from a TaskIns.""" - current_time = time.time() - ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) - if ttl < 0: - log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") - ttl = 0 - return TaskRes( - task_id=str(uuid4()), - group_id=ref_taskins.group_id, - run_id=ref_taskins.run_id, - task=Task( - producer=Node(node_id=ref_taskins.task.consumer.node_id, anonymous=False), - consumer=Node(node_id=ref_taskins.task.producer.node_id, anonymous=False), - created_at=current_time, - ttl=ttl, - ancestry=[ref_taskins.task_id], - task_type=ref_taskins.task.task_type, - error=Error( - code=ErrorCode.NODE_UNAVAILABLE, reason=NODE_UNAVAILABLE_ERROR_REASON - ), - ), - ) diff --git a/src/py/flwr/simulation/__init__.py b/src/py/flwr/simulation/__init__.py index a171347b1507..d7277ddcdd46 100644 --- a/src/py/flwr/simulation/__init__.py +++ b/src/py/flwr/simulation/__init__.py @@ -17,12 +17,14 @@ import importlib +from flwr.simulation.app import run_simulation_process from flwr.simulation.run_simulation import run_simulation +from flwr.simulation.simulationio_connection import SimulationIoConnection is_ray_installed = importlib.util.find_spec("ray") is not None if is_ray_installed: - from flwr.simulation.app import start_simulation + from flwr.simulation.legacy_app import start_simulation else: RAY_IMPORT_ERROR: str = """Unable to import module `ray`. @@ -37,6 +39,8 @@ def start_simulation(*args, **kwargs): # type: ignore __all__ = [ + "SimulationIoConnection", "run_simulation", + "run_simulation_process", "start_simulation", ] diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 0070d75c53dc..cecf24081458 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -12,371 +12,260 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower simulation app.""" +"""Flower Simulation process.""" - -import asyncio -import logging +import argparse import sys -import threading -import traceback -import warnings -from logging import ERROR, INFO -from typing import Any, Optional, Union - -import ray -from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy - -from flwr.client import ClientFnExt -from flwr.common import EventType, event -from flwr.common.constant import NODE_ID_NUM_BYTES -from flwr.common.logger import log, set_logger_propagation, warn_unsupported_feature -from flwr.server.client_manager import ClientManager -from flwr.server.history import History -from flwr.server.server import Server, init_defaults, run_fl -from flwr.server.server_config import ServerConfig -from flwr.server.strategy import Strategy -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes -from flwr.simulation.ray_transport.ray_actor import ( - ClientAppActor, - VirtualClientEngineActor, - VirtualClientEngineActorPool, - pool_size_from_resources, +from logging import DEBUG, ERROR, INFO +from queue import Queue +from time import sleep +from typing import Optional + +from flwr.cli.config_utils import get_fab_metadata +from flwr.cli.install import install_from_fab +from flwr.common import EventType +from flwr.common.args import add_args_flwr_app_common +from flwr.common.config import ( + get_flwr_dir, + get_fused_config_from_dir, + get_project_config, + get_project_dir, + unflatten_dict, ) -from flwr.simulation.ray_transport.ray_client_proxy import RayActorClientProxy - -INVALID_ARGUMENTS_START_SIMULATION = """ -INVALID ARGUMENTS ERROR - -Invalid Arguments in method: - -`start_simulation( - *, - client_fn: ClientFn, - num_clients: int, - clients_ids: Optional[List[str]] = None, - client_resources: Optional[Dict[str, float]] = None, - server: Optional[Server] = None, - config: ServerConfig = None, - strategy: Optional[Strategy] = None, - client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, -) -> None:` - -REASON: - Method requires: - - Either `num_clients`[int] or `clients_ids`[List[str]] - to be set exclusively. - OR - - `len(clients_ids)` == `num_clients` - -""" - -NodeToPartitionMapping = dict[int, int] - - -def _create_node_id_to_partition_mapping( - num_clients: int, -) -> NodeToPartitionMapping: - """Generate a node_id:partition_id mapping.""" - nodes_mapping: NodeToPartitionMapping = {} # {node-id; partition-id} - for i in range(num_clients): - while True: - node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) - if node_id not in nodes_mapping: - break - nodes_mapping[node_id] = i - return nodes_mapping - - -# pylint: disable=too-many-arguments,too-many-statements,too-many-branches -def start_simulation( - *, - client_fn: ClientFnExt, - num_clients: int, - clients_ids: Optional[list[str]] = None, # UNSUPPORTED, WILL BE REMOVED - client_resources: Optional[dict[str, float]] = None, - server: Optional[Server] = None, - config: Optional[ServerConfig] = None, - strategy: Optional[Strategy] = None, - client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[dict[str, Any]] = None, - keep_initialised: Optional[bool] = False, - actor_type: type[VirtualClientEngineActor] = ClientAppActor, - actor_kwargs: Optional[dict[str, Any]] = None, - actor_scheduling: Union[str, NodeAffinitySchedulingStrategy] = "DEFAULT", -) -> History: - """Start a Ray-based Flower simulation server. - - Parameters - ---------- - client_fn : ClientFnExt - A function creating `Client` instances. The function must have the signature - `client_fn(context: Context). It should return - a single client instance of type `Client`. Note that the created client - instances are ephemeral and will often be destroyed after a single method - invocation. Since client instances are not long-lived, they should not attempt - to carry state over method invocations. Any state required by the instance - (model, dataset, hyperparameters, ...) should be (re-)created in either the - call to `client_fn` or the call to any of the client methods (e.g., load - evaluation data in the `evaluate` method itself). - num_clients : int - The total number of clients in this simulation. - clients_ids : Optional[List[str]] - UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. - List `client_id`s for each client. This is only required if - `num_clients` is not set. Setting both `num_clients` and `clients_ids` - with `len(clients_ids)` not equal to `num_clients` generates an error. - Using this argument will raise an error. - client_resources : Optional[Dict[str, float]] (default: `{"num_cpus": 1, "num_gpus": 0.0}`) - CPU and GPU resources for a single client. Supported keys - are `num_cpus` and `num_gpus`. To understand the GPU utilization caused by - `num_gpus`, as well as using custom resources, please consult the Ray - documentation. - server : Optional[flwr.server.Server] (default: None). - An implementation of the abstract base class `flwr.server.Server`. If no - instance is provided, then `start_server` will create one. - config: ServerConfig (default: None). - Currently supported values are `num_rounds` (int, default: 1) and - `round_timeout` in seconds (float, default: None). - strategy : Optional[flwr.server.Strategy] (default: None) - An implementation of the abstract base class `flwr.server.Strategy`. If - no strategy is provided, then `start_server` will use - `flwr.server.strategy.FedAvg`. - client_manager : Optional[flwr.server.ClientManager] (default: None) - An implementation of the abstract base class `flwr.server.ClientManager`. - If no implementation is provided, then `start_simulation` will use - `flwr.server.client_manager.SimpleClientManager`. - ray_init_args : Optional[Dict[str, Any]] (default: None) - Optional dictionary containing arguments for the call to `ray.init`. - If ray_init_args is None (the default), Ray will be initialized with - the following default args: - - { "ignore_reinit_error": True, "include_dashboard": False } - - An empty dictionary can be used (ray_init_args={}) to prevent any - arguments from being passed to ray.init. - keep_initialised: Optional[bool] (default: False) - Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`. - - actor_type: VirtualClientEngineActor (default: ClientAppActor) - Optionally specify the type of actor to use. The actor object, which - persists throughout the simulation, will be the process in charge of - executing a ClientApp wrapping input argument `client_fn`. - - actor_kwargs: Optional[Dict[str, Any]] (default: None) - If you want to create your own Actor classes, you might need to pass - some input argument. You can use this dictionary for such purpose. - - actor_scheduling: Optional[Union[str, NodeAffinitySchedulingStrategy]] - (default: "DEFAULT") - Optional string ("DEFAULT" or "SPREAD") for the VCE to choose in which - node the actor is placed. If you are an advanced user needed more control - you can use lower-level scheduling strategies to pin actors to specific - compute nodes (e.g. via NodeAffinitySchedulingStrategy). Please note this - is an advanced feature. For all details, please refer to the Ray documentation: - https://docs.ray.io/en/latest/ray-core/scheduling/index.html - - Returns - ------- - hist : flwr.server.history.History - Object containing metrics from training. - """ # noqa: E501 - # pylint: disable-msg=too-many-locals - event( - EventType.START_SIMULATION_ENTER, - {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, - ) - - if clients_ids is not None: - warn_unsupported_feature( - "Passing `clients_ids` to `start_simulation` is deprecated and not longer " - "used by `start_simulation`. Use `num_clients` exclusively instead." - ) - log(ERROR, "`clients_ids` argument used.") - sys.exit() - - # Set logger propagation - loop: Optional[asyncio.AbstractEventLoop] = None - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - finally: - if loop and loop.is_running(): - # Set logger propagation to False to prevent duplicated log output in Colab. - logger = logging.getLogger("flwr") - _ = set_logger_propagation(logger, False) - - # Initialize server and server config - initialized_server, initialized_config = init_defaults( - server=server, - config=config, - strategy=strategy, - client_manager=client_manager, - ) - - log( - INFO, - "Starting Flower simulation, config: %s", - initialized_config, - ) +from flwr.common.constant import ( + SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS, + Status, + SubStatus, +) +from flwr.common.logger import ( + log, + mirror_output_to_queue, + restore_output, + start_log_uploader, + stop_log_uploader, +) +from flwr.common.serde import ( + configs_record_from_proto, + context_from_proto, + fab_from_proto, + run_from_proto, + run_status_to_proto, +) +from flwr.common.typing import RunStatus +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + GetFederationOptionsRequest, + GetFederationOptionsResponse, + UpdateRunStatusRequest, +) +from flwr.proto.simulationio_pb2 import ( # pylint: disable=E0611 + PullSimulationInputsRequest, + PullSimulationInputsResponse, + PushSimulationOutputsRequest, +) +from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig +from flwr.simulation.run_simulation import _run_simulation +from flwr.simulation.simulationio_connection import SimulationIoConnection - # Create node-id to partition-id mapping - nodes_mapping = _create_node_id_to_partition_mapping(num_clients) - # Default arguments for Ray initialization - if not ray_init_args: - ray_init_args = { - "ignore_reinit_error": True, - "include_dashboard": False, - } +def flwr_simulation() -> None: + """Run process-isolated Flower Simulation.""" + # Capture stdout/stderr + log_queue: Queue[Optional[str]] = Queue() + mirror_output_to_queue(log_queue) - # Shut down Ray if it has already been initialized (unless asked not to) - if ray.is_initialized() and not keep_initialised: - ray.shutdown() + args = _parse_args_run_flwr_simulation().parse_args() - # Initialize Ray - ray.init(**ray_init_args) - cluster_resources = ray.cluster_resources() - log( - INFO, - "Flower VCE: Ray initialized with resources: %s", - cluster_resources, - ) + log(INFO, "Starting Flower Simulation") - log( - INFO, - "Optimize your simulation with Flower VCE: " - "https://flower.ai/docs/framework/how-to-run-simulations.html", - ) - - # Log the resources that a single client will be able to use - if client_resources is None: + if not args.insecure: log( - INFO, - "No `client_resources` specified. Using minimal resources for clients.", - ) - client_resources = {"num_cpus": 1, "num_gpus": 0.0} - - # Each client needs at the very least one CPU - if "num_cpus" not in client_resources: - warnings.warn( - "No `num_cpus` specified in `client_resources`. " - "Using `num_cpus=1` for each client.", - stacklevel=2, + ERROR, + "`flwr-simulation` does not support TLS yet. " + "Please use the '--insecure' flag.", ) - client_resources["num_cpus"] = 1 + sys.exit(1) log( - INFO, - "Flower VCE: Resources for each Virtual Client: %s", - client_resources, + DEBUG, + "Starting isolated `Simulation` connected to SuperLink SimulationAppIo API " + "at %s", + args.simulationio_api_address, + ) + run_simulation_process( + simulationio_api_address=args.simulationio_api_address, + log_queue=log_queue, + run_once=args.run_once, + flwr_dir_=args.flwr_dir, + certificates=None, ) - actor_args = {} if actor_kwargs is None else actor_kwargs - - # An actor factory. This is called N times to add N actors - # to the pool. If at some point the pool can accommodate more actors - # this will be called again. - def create_actor_fn() -> type[VirtualClientEngineActor]: - return actor_type.options( # type: ignore - **client_resources, - scheduling_strategy=actor_scheduling, - ).remote(**actor_args) - - # Instantiate ActorPool - pool = VirtualClientEngineActorPool( - create_actor_fn=create_actor_fn, - client_resources=client_resources, + # Restore stdout/stderr + restore_output() + + +def run_simulation_process( # pylint: disable=R0914, disable=W0212, disable=R0915 + simulationio_api_address: str, + log_queue: Queue[Optional[str]], + run_once: bool, + flwr_dir_: Optional[str] = None, + certificates: Optional[bytes] = None, +) -> None: + """Run Flower Simulation process.""" + conn = SimulationIoConnection( + simulationio_service_address=simulationio_api_address, + root_certificates=certificates, ) - f_stop = threading.Event() - - # Periodically, check if the cluster has grown (i.e. a new - # node has been added). If this happens, we likely want to grow - # the actor pool by adding more Actors to it. - def update_resources(f_stop: threading.Event) -> None: - """Periodically check if more actors can be added to the pool. - - If so, extend the pool. - """ - if not f_stop.is_set(): - num_max_actors = pool_size_from_resources(client_resources) - if num_max_actors > pool.num_actors: - num_new = num_max_actors - pool.num_actors - log( - INFO, "The cluster expanded. Adding %s actors to the pool.", num_new + # Resolve directory where FABs are installed + flwr_dir = get_flwr_dir(flwr_dir_) + log_uploader = None + + while True: + + try: + # Pull SimulationInputs from LinkState + req = PullSimulationInputsRequest() + res: PullSimulationInputsResponse = conn._stub.PullSimulationInputs(req) + if not res.HasField("run"): + sleep(3) + run_status = None + continue + + context = context_from_proto(res.context) + run = run_from_proto(res.run) + fab = fab_from_proto(res.fab) + + # Start log uploader for this run + log_uploader = start_log_uploader( + log_queue=log_queue, + node_id=context.node_id, + run_id=run.run_id, + stub=conn._stub, + ) + + log(DEBUG, "Simulation process starts FAB installation.") + install_from_fab(fab.content, flwr_dir=flwr_dir, skip_prompt=True) + + fab_id, fab_version = get_fab_metadata(fab.content) + + app_path = get_project_dir(fab_id, fab_version, fab.hash_str, flwr_dir) + config = get_project_config(app_path) + + # Get ClientApp and SeverApp components + app_components = config["tool"]["flwr"]["app"]["components"] + client_app_attr = app_components["clientapp"] + server_app_attr = app_components["serverapp"] + fused_config = get_fused_config_from_dir(app_path, run.override_config) + + # Update run_config in context + context.run_config = fused_config + + log( + DEBUG, + "Flower will load ServerApp `%s` in %s", + server_app_attr, + app_path, + ) + log( + DEBUG, + "Flower will load ClientApp `%s` in %s", + client_app_attr, + app_path, + ) + + # Change status to Running + run_status_proto = run_status_to_proto(RunStatus(Status.RUNNING, "", "")) + conn._stub.UpdateRunStatus( + UpdateRunStatusRequest(run_id=run.run_id, run_status=run_status_proto) + ) + + # Pull Federation Options + fed_opt_res: GetFederationOptionsResponse = conn._stub.GetFederationOptions( + GetFederationOptionsRequest(run_id=run.run_id) + ) + federation_options = configs_record_from_proto( + fed_opt_res.federation_options + ) + + # Unflatten underlying dict + fed_opt = unflatten_dict({**federation_options}) + + # Extract configs values of interest + num_supernodes = fed_opt.get("num-supernodes") + if num_supernodes is None: + raise ValueError( + "Federation options expects `num-supernodes` to be set." + ) + backend_config: BackendConfig = fed_opt.get("backend", {}) + verbose: bool = fed_opt.get("verbose", False) + enable_tf_gpu_growth: bool = fed_opt.get("enable_tf_gpu_growth", False) + + # Launch the simulation + _run_simulation( + server_app_attr=server_app_attr, + client_app_attr=client_app_attr, + num_supernodes=num_supernodes, + backend_config=backend_config, + app_dir=str(app_path), + run=run, + enable_tf_gpu_growth=enable_tf_gpu_growth, + verbose_logging=verbose, + server_app_run_config=fused_config, + is_app=True, + exit_event=EventType.CLI_FLOWER_SIMULATION_LEAVE, + ) + + # Send resulting context + context_proto = None # context_to_proto(updated_context) + out_req = PushSimulationOutputsRequest( + run_id=run.run_id, context=context_proto + ) + _ = conn._stub.PushSimulationOutputs(out_req) + + run_status = RunStatus(Status.FINISHED, SubStatus.COMPLETED, "") + + except Exception as ex: # pylint: disable=broad-exception-caught + exc_entity = "Simulation" + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + run_status = RunStatus(Status.FINISHED, SubStatus.FAILED, str(ex)) + + finally: + # Stop log uploader for this run and upload final logs + if log_uploader: + stop_log_uploader(log_queue, log_uploader) + log_uploader = None + + # Update run status + if run_status: + run_status_proto = run_status_to_proto(run_status) + conn._stub.UpdateRunStatus( + UpdateRunStatusRequest( + run_id=run.run_id, run_status=run_status_proto + ) ) - pool.add_actors_to_pool(num_actors=num_new) - threading.Timer(10, update_resources, [f_stop]).start() + # Stop the loop if `flwr-simulation` is expected to process a single run + if run_once: + break - update_resources(f_stop) - log( - INFO, - "Flower VCE: Creating %s with %s actors", - pool.__class__.__name__, - pool.num_actors, +def _parse_args_run_flwr_simulation() -> argparse.ArgumentParser: + """Parse flwr-simulation command line arguments.""" + parser = argparse.ArgumentParser( + description="Run a Flower Simulation", ) - - # Register one RayClientProxy object for each client with the ClientManager - for node_id, partition_id in nodes_mapping.items(): - client_proxy = RayActorClientProxy( - client_fn=client_fn, - node_id=node_id, - partition_id=partition_id, - num_partitions=num_clients, - actor_pool=pool, - ) - initialized_server.client_manager().register(client=client_proxy) - - hist = History() - # pylint: disable=broad-except - try: - # Start training - hist = run_fl( - server=initialized_server, - config=initialized_config, - ) - except Exception as ex: - log(ERROR, ex) - log(ERROR, traceback.format_exc()) - log( - ERROR, - "Your simulation crashed :(. This could be because of several reasons. " - "The most common are: " - "\n\t > Sometimes, issues in the simulation code itself can cause crashes. " - "It's always a good idea to double-check your code for any potential bugs " - "or inconsistencies that might be contributing to the problem. " - "For example: " - "\n\t\t - You might be using a class attribute in your clients that " - "hasn't been defined." - "\n\t\t - There could be an incorrect method call to a 3rd party library " - "(e.g., PyTorch)." - "\n\t\t - The return types of methods in your clients/strategies might be " - "incorrect." - "\n\t > Your system couldn't fit a single VirtualClient: try lowering " - "`client_resources`." - "\n\t > All the actors in your pool crashed. This could be because: " - "\n\t\t - You clients hit an out-of-memory (OOM) error and actors couldn't " - "recover from it. Try launching your simulation with more generous " - "`client_resources` setting (i.e. it seems %s is " - "not enough for your run). Use fewer concurrent actors. " - "\n\t\t - You were running a multi-node simulation and all worker nodes " - "disconnected. The head node might still be alive but cannot accommodate " - "any actor with resources: %s." - "\nTake a look at the Flower simulation examples for guidance " - ".", - client_resources, - client_resources, - ) - raise RuntimeError("Simulation crashed.") from ex - - finally: - # Stop time monitoring resources in cluster - f_stop.set() - event(EventType.START_SIMULATION_LEAVE) - - return hist + parser.add_argument( + "--simulationio-api-address", + default=SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS, + type=str, + help="Address of SuperLink's SimulationIO API (IPv4, IPv6, or a domain name)." + f"By default, it is set to {SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS}.", + ) + parser.add_argument( + "--run-once", + action="store_true", + help="When set, this process will start a single simulation " + "for a pending Run. If no pending run the process will exit. ", + ) + add_args_flwr_app_common(parser=parser) + return parser diff --git a/src/py/flwr/simulation/legacy_app.py b/src/py/flwr/simulation/legacy_app.py new file mode 100644 index 000000000000..ce69692008d0 --- /dev/null +++ b/src/py/flwr/simulation/legacy_app.py @@ -0,0 +1,402 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower simulation app.""" + + +import asyncio +import logging +import sys +import threading +import traceback +import warnings +from logging import ERROR, INFO +from typing import Any, Optional, Union + +import ray +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy + +from flwr.client import ClientFnExt +from flwr.common import EventType, event +from flwr.common.constant import NODE_ID_NUM_BYTES +from flwr.common.logger import ( + log, + set_logger_propagation, + warn_deprecated_feature, + warn_unsupported_feature, +) +from flwr.server.client_manager import ClientManager +from flwr.server.history import History +from flwr.server.server import Server, init_defaults, run_fl +from flwr.server.server_config import ServerConfig +from flwr.server.strategy import Strategy +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes +from flwr.simulation.ray_transport.ray_actor import ( + ClientAppActor, + VirtualClientEngineActor, + VirtualClientEngineActorPool, + pool_size_from_resources, +) +from flwr.simulation.ray_transport.ray_client_proxy import RayActorClientProxy + +INVALID_ARGUMENTS_START_SIMULATION = """ +INVALID ARGUMENTS ERROR + +Invalid Arguments in method: + +`start_simulation( + *, + client_fn: ClientFn, + num_clients: int, + clients_ids: Optional[List[str]] = None, + client_resources: Optional[Dict[str, float]] = None, + server: Optional[Server] = None, + config: ServerConfig = None, + strategy: Optional[Strategy] = None, + client_manager: Optional[ClientManager] = None, + ray_init_args: Optional[Dict[str, Any]] = None, +) -> None:` + +REASON: + Method requires: + - Either `num_clients`[int] or `clients_ids`[List[str]] + to be set exclusively. + OR + - `len(clients_ids)` == `num_clients` + +""" + +NodeToPartitionMapping = dict[int, int] + + +def _create_node_id_to_partition_mapping( + num_clients: int, +) -> NodeToPartitionMapping: + """Generate a node_id:partition_id mapping.""" + nodes_mapping: NodeToPartitionMapping = {} # {node-id; partition-id} + for i in range(num_clients): + while True: + node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + if node_id not in nodes_mapping: + break + nodes_mapping[node_id] = i + return nodes_mapping + + +# pylint: disable=too-many-arguments,too-many-statements,too-many-branches +def start_simulation( + *, + client_fn: ClientFnExt, + num_clients: int, + clients_ids: Optional[list[str]] = None, # UNSUPPORTED, WILL BE REMOVED + client_resources: Optional[dict[str, float]] = None, + server: Optional[Server] = None, + config: Optional[ServerConfig] = None, + strategy: Optional[Strategy] = None, + client_manager: Optional[ClientManager] = None, + ray_init_args: Optional[dict[str, Any]] = None, + keep_initialised: Optional[bool] = False, + actor_type: type[VirtualClientEngineActor] = ClientAppActor, + actor_kwargs: Optional[dict[str, Any]] = None, + actor_scheduling: Union[str, NodeAffinitySchedulingStrategy] = "DEFAULT", +) -> History: + """Start a Ray-based Flower simulation server. + + Warning + ------- + This function is deprecated since 1.13.0. Use :code: `flwr run` to start a Flower + simulation. + + Parameters + ---------- + client_fn : ClientFnExt + A function creating `Client` instances. The function must have the signature + `client_fn(context: Context). It should return + a single client instance of type `Client`. Note that the created client + instances are ephemeral and will often be destroyed after a single method + invocation. Since client instances are not long-lived, they should not attempt + to carry state over method invocations. Any state required by the instance + (model, dataset, hyperparameters, ...) should be (re-)created in either the + call to `client_fn` or the call to any of the client methods (e.g., load + evaluation data in the `evaluate` method itself). + num_clients : int + The total number of clients in this simulation. + clients_ids : Optional[List[str]] + UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. + List `client_id`s for each client. This is only required if + `num_clients` is not set. Setting both `num_clients` and `clients_ids` + with `len(clients_ids)` not equal to `num_clients` generates an error. + Using this argument will raise an error. + client_resources : Optional[Dict[str, float]] (default: `{"num_cpus": 1, "num_gpus": 0.0}`) + CPU and GPU resources for a single client. Supported keys + are `num_cpus` and `num_gpus`. To understand the GPU utilization caused by + `num_gpus`, as well as using custom resources, please consult the Ray + documentation. + server : Optional[flwr.server.Server] (default: None). + An implementation of the abstract base class `flwr.server.Server`. If no + instance is provided, then `start_server` will create one. + config: ServerConfig (default: None). + Currently supported values are `num_rounds` (int, default: 1) and + `round_timeout` in seconds (float, default: None). + strategy : Optional[flwr.server.Strategy] (default: None) + An implementation of the abstract base class `flwr.server.Strategy`. If + no strategy is provided, then `start_server` will use + `flwr.server.strategy.FedAvg`. + client_manager : Optional[flwr.server.ClientManager] (default: None) + An implementation of the abstract base class `flwr.server.ClientManager`. + If no implementation is provided, then `start_simulation` will use + `flwr.server.client_manager.SimpleClientManager`. + ray_init_args : Optional[Dict[str, Any]] (default: None) + Optional dictionary containing arguments for the call to `ray.init`. + If ray_init_args is None (the default), Ray will be initialized with + the following default args: + + { "ignore_reinit_error": True, "include_dashboard": False } + + An empty dictionary can be used (ray_init_args={}) to prevent any + arguments from being passed to ray.init. + keep_initialised: Optional[bool] (default: False) + Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`. + + actor_type: VirtualClientEngineActor (default: ClientAppActor) + Optionally specify the type of actor to use. The actor object, which + persists throughout the simulation, will be the process in charge of + executing a ClientApp wrapping input argument `client_fn`. + + actor_kwargs: Optional[Dict[str, Any]] (default: None) + If you want to create your own Actor classes, you might need to pass + some input argument. You can use this dictionary for such purpose. + + actor_scheduling: Optional[Union[str, NodeAffinitySchedulingStrategy]] + (default: "DEFAULT") + Optional string ("DEFAULT" or "SPREAD") for the VCE to choose in which + node the actor is placed. If you are an advanced user needed more control + you can use lower-level scheduling strategies to pin actors to specific + compute nodes (e.g. via NodeAffinitySchedulingStrategy). Please note this + is an advanced feature. For all details, please refer to the Ray documentation: + https://docs.ray.io/en/latest/ray-core/scheduling/index.html + + Returns + ------- + hist : flwr.server.history.History + Object containing metrics from training. + """ # noqa: E501 + # pylint: disable-msg=too-many-locals + msg = ( + "flwr.simulation.start_simulation() is deprecated." + "\n\tInstead, use the `flwr run` CLI command to start a local simulation " + "in your Flower app, as shown for example below:" + "\n\n\t\t$ flwr new # Create a new Flower app from a template" + "\n\n\t\t$ flwr run # Run the Flower app in Simulation Mode" + "\n\n\tUsing `start_simulation()` is deprecated." + ) + warn_deprecated_feature(name=msg) + + event( + EventType.START_SIMULATION_ENTER, + {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, + ) + + if clients_ids is not None: + warn_unsupported_feature( + "Passing `clients_ids` to `start_simulation` is deprecated and not longer " + "used by `start_simulation`. Use `num_clients` exclusively instead." + ) + log(ERROR, "`clients_ids` argument used.") + sys.exit() + + # Set logger propagation + loop: Optional[asyncio.AbstractEventLoop] = None + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + finally: + if loop and loop.is_running(): + # Set logger propagation to False to prevent duplicated log output in Colab. + logger = logging.getLogger("flwr") + _ = set_logger_propagation(logger, False) + + # Initialize server and server config + initialized_server, initialized_config = init_defaults( + server=server, + config=config, + strategy=strategy, + client_manager=client_manager, + ) + + log( + INFO, + "Starting Flower simulation, config: %s", + initialized_config, + ) + + # Create node-id to partition-id mapping + nodes_mapping = _create_node_id_to_partition_mapping(num_clients) + + # Default arguments for Ray initialization + if not ray_init_args: + ray_init_args = { + "ignore_reinit_error": True, + "include_dashboard": False, + } + + # Shut down Ray if it has already been initialized (unless asked not to) + if ray.is_initialized() and not keep_initialised: + ray.shutdown() + + # Initialize Ray + ray.init(**ray_init_args) + cluster_resources = ray.cluster_resources() + log( + INFO, + "Flower VCE: Ray initialized with resources: %s", + cluster_resources, + ) + + log( + INFO, + "Optimize your simulation with Flower VCE: " + "https://flower.ai/docs/framework/how-to-run-simulations.html", + ) + + # Log the resources that a single client will be able to use + if client_resources is None: + log( + INFO, + "No `client_resources` specified. Using minimal resources for clients.", + ) + client_resources = {"num_cpus": 1, "num_gpus": 0.0} + + # Each client needs at the very least one CPU + if "num_cpus" not in client_resources: + warnings.warn( + "No `num_cpus` specified in `client_resources`. " + "Using `num_cpus=1` for each client.", + stacklevel=2, + ) + client_resources["num_cpus"] = 1 + + log( + INFO, + "Flower VCE: Resources for each Virtual Client: %s", + client_resources, + ) + + actor_args = {} if actor_kwargs is None else actor_kwargs + + # An actor factory. This is called N times to add N actors + # to the pool. If at some point the pool can accommodate more actors + # this will be called again. + def create_actor_fn() -> type[VirtualClientEngineActor]: + return actor_type.options( # type: ignore + **client_resources, + scheduling_strategy=actor_scheduling, + ).remote(**actor_args) + + # Instantiate ActorPool + pool = VirtualClientEngineActorPool( + create_actor_fn=create_actor_fn, + client_resources=client_resources, + ) + + f_stop = threading.Event() + + # Periodically, check if the cluster has grown (i.e. a new + # node has been added). If this happens, we likely want to grow + # the actor pool by adding more Actors to it. + def update_resources(f_stop: threading.Event) -> None: + """Periodically check if more actors can be added to the pool. + + If so, extend the pool. + """ + if not f_stop.is_set(): + num_max_actors = pool_size_from_resources(client_resources) + if num_max_actors > pool.num_actors: + num_new = num_max_actors - pool.num_actors + log( + INFO, "The cluster expanded. Adding %s actors to the pool.", num_new + ) + pool.add_actors_to_pool(num_actors=num_new) + + threading.Timer(10, update_resources, [f_stop]).start() + + update_resources(f_stop) + + log( + INFO, + "Flower VCE: Creating %s with %s actors", + pool.__class__.__name__, + pool.num_actors, + ) + + # Register one RayClientProxy object for each client with the ClientManager + for node_id, partition_id in nodes_mapping.items(): + client_proxy = RayActorClientProxy( + client_fn=client_fn, + node_id=node_id, + partition_id=partition_id, + num_partitions=num_clients, + actor_pool=pool, + ) + initialized_server.client_manager().register(client=client_proxy) + + hist = History() + # pylint: disable=broad-except + try: + # Start training + hist = run_fl( + server=initialized_server, + config=initialized_config, + ) + except Exception as ex: + log(ERROR, ex) + log(ERROR, traceback.format_exc()) + log( + ERROR, + "Your simulation crashed :(. This could be because of several reasons. " + "The most common are: " + "\n\t > Sometimes, issues in the simulation code itself can cause crashes. " + "It's always a good idea to double-check your code for any potential bugs " + "or inconsistencies that might be contributing to the problem. " + "For example: " + "\n\t\t - You might be using a class attribute in your clients that " + "hasn't been defined." + "\n\t\t - There could be an incorrect method call to a 3rd party library " + "(e.g., PyTorch)." + "\n\t\t - The return types of methods in your clients/strategies might be " + "incorrect." + "\n\t > Your system couldn't fit a single VirtualClient: try lowering " + "`client_resources`." + "\n\t > All the actors in your pool crashed. This could be because: " + "\n\t\t - You clients hit an out-of-memory (OOM) error and actors couldn't " + "recover from it. Try launching your simulation with more generous " + "`client_resources` setting (i.e. it seems %s is " + "not enough for your run). Use fewer concurrent actors. " + "\n\t\t - You were running a multi-node simulation and all worker nodes " + "disconnected. The head node might still be alive but cannot accommodate " + "any actor with resources: %s." + "\nTake a look at the Flower simulation examples for guidance " + ".", + client_resources, + client_resources, + ) + raise RuntimeError("Simulation crashed.") from ex + + finally: + # Stop time monitoring resources in cluster + f_stop.set() + event(EventType.START_SIMULATION_LEAVE) + + return hist diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index ad9be6bd1fc0..a5d4b27d3e5a 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -22,7 +22,7 @@ from flwr import common from flwr.client import ClientFnExt from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.constant import ( NUM_PARTITIONS_KEY, @@ -65,7 +65,7 @@ def _load_app() -> ClientApp: self.app_fn = _load_app self.actor_pool = actor_pool - self.proxy_state = NodeState( + self.proxy_state = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: str(partition_id), diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index ce0ef46d135f..af8c6137bb08 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -22,7 +22,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ( DEFAULT_TTL, Config, @@ -40,7 +40,7 @@ recordset_to_getpropertiesres, ) from flwr.common.recordset_compat_test import _get_valid_getpropertiesins -from flwr.simulation.app import ( +from flwr.simulation.legacy_app import ( NodeToPartitionMapping, _create_node_id_to_partition_mapping, ) @@ -142,7 +142,7 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: """Test that ClientProxies get the result of client job they submit. All jobs are submitted at the same time. Then fetched one at a time. This also tests - NodeState (at each Proxy) and RunState basic functionality. + DeprecatedRunInfoStore (at each Proxy) and RunState basic functionality. """ proxies, _, _ = prep() run_id = 0 @@ -193,10 +193,10 @@ def test_cid_consistency_without_proxies() -> None: _, pool, mapping = prep() node_ids = list(mapping.keys()) - # register node states - node_states: dict[int, NodeState] = {} + # register DeprecatedRunInfoStores + node_info_stores: dict[int, DeprecatedRunInfoStore] = {} for node_id, partition_id in mapping.items(): - node_states[node_id] = NodeState( + node_info_stores[node_id] = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: str(partition_id), @@ -228,8 +228,8 @@ def _load_app() -> ClientApp: ), ) # register and retrieve context - node_states[node_id].register_context(run_id=run_id) - context = node_states[node_id].retrieve_context(run_id=run_id) + node_info_stores[node_id].register_context(run_id=run_id) + context = node_info_stores[node_id].retrieve_context(run_id=run_id) partition_id_str = str(context.node_config[PARTITION_ID_KEY]) pool.submit_client_job( lambda a, c_fn, j_fn, nid_, state: a.run.remote(c_fn, j_fn, nid_, state), diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 8c4e42c34744..c70f4b59a534 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -21,7 +21,6 @@ import sys import threading import traceback -from argparse import Namespace from logging import DEBUG, ERROR, INFO, WARNING from pathlib import Path from time import sleep @@ -29,69 +28,28 @@ from flwr.cli.config_utils import load_and_validate from flwr.client import ClientApp -from flwr.common import EventType, event, log +from flwr.common import Context, EventType, RecordSet, event, log, now from flwr.common.config import get_fused_config_from_dir, parse_config_args -from flwr.common.constant import RUN_ID_NUM_BYTES +from flwr.common.constant import RUN_ID_NUM_BYTES, Status from flwr.common.logger import ( set_logger_propagation, update_console_handler, - warn_deprecated_feature, warn_deprecated_feature_with_example, ) -from flwr.common.typing import Run, UserConfig +from flwr.common.typing import Run, RunStatus, UserConfig from flwr.server.driver import Driver, InMemoryDriver -from flwr.server.run_serverapp import run as run_server_app +from flwr.server.run_serverapp import run as _run from flwr.server.server_app import ServerApp from flwr.server.superlink.fleet import vce from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig -from flwr.server.superlink.state import StateFactory -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate import LinkStateFactory +from flwr.server.superlink.linkstate.in_memory_linkstate import RunRecord +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from flwr.simulation.ray_transport.utils import ( enable_tf_gpu_growth as enable_gpu_growth, ) -def _check_args_do_not_interfere(args: Namespace) -> bool: - """Ensure decoupling of flags for different ways to start the simulation.""" - mode_one_args = ["app", "run_config"] - mode_two_args = ["client_app", "server_app"] - - def _resolve_message(conflict_keys: list[str]) -> str: - return ",".join([f"`--{key}`".replace("_", "-") for key in conflict_keys]) - - # When passing `--app`, `--app-dir` is ignored - if args.app and args.app_dir: - log(ERROR, "Either `--app` or `--app-dir` can be set, but not both.") - return False - - if any(getattr(args, key) for key in mode_one_args): - if any(getattr(args, key) for key in mode_two_args): - log( - ERROR, - "Passing any of {%s} alongside with any of {%s}", - _resolve_message(mode_one_args), - _resolve_message(mode_two_args), - ) - return False - - if not args.app: - log(ERROR, "You need to pass --app") - return False - - return True - - # Ensure all args are set (required for the non-FAB mode of execution) - if not all(getattr(args, key) for key in mode_two_args): - log( - ERROR, - "Passing all of %s keys are required.", - _resolve_message(mode_two_args), - ) - return False - - return True - - def _replace_keys(d: Any, match: str, target: str) -> Any: if isinstance(d, dict): return { @@ -114,19 +72,6 @@ def run_simulation_from_cli() -> None: event_details={"backend": args.backend, "num-supernodes": args.num_supernodes}, ) - # Add warnings for deprecated server_app and client_app arguments - if args.server_app: - warn_deprecated_feature( - "The `--server-app` argument is deprecated. " - "Please use the `--app` argument instead." - ) - - if args.client_app: - warn_deprecated_feature( - "The `--client-app` argument is deprecated. " - "Use the `--app` argument instead." - ) - if args.enable_tf_gpu_growth: warn_deprecated_feature_with_example( "Passing `--enable-tf-gpu-growth` is deprecated.", @@ -143,69 +88,43 @@ def run_simulation_from_cli() -> None: backend_config_dict = _replace_keys(backend_config_dict, match="-", target="_") log(DEBUG, "backend_config_dict: %s", backend_config_dict) - # We are supporting two modes for the CLI entrypoint: - # 1) Running an app dir containing a `pyproject.toml` - # 2) Running any ClientApp and SeverApp w/o pyproject.toml being present - # For 2), some CLI args are compulsory, but they are not required for 1) - # We first do these checks - args_check_pass = _check_args_do_not_interfere(args) - if not args_check_pass: - sys.exit("Simulation Engine cannot start.") - run_id = ( generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) if args.run_id is None else args.run_id ) - if args.app: - # Mode 1 - app_path = Path(args.app) - if not app_path.is_dir(): - log(ERROR, "--app is not a directory") - sys.exit("Simulation Engine cannot start.") - - # Load pyproject.toml - config, errors, warnings = load_and_validate( - app_path / "pyproject.toml", check_module=False - ) - if errors: - raise ValueError(errors) - if warnings: - log(WARNING, warnings) + app_path = Path(args.app) + if not app_path.is_dir(): + log(ERROR, "--app is not a directory") + sys.exit("Simulation Engine cannot start.") - if config is None: - raise ValueError("Config extracted from FAB's pyproject.toml is not valid") + # Load pyproject.toml + config, errors, warnings = load_and_validate( + app_path / "pyproject.toml", check_module=False + ) + if errors: + raise ValueError(errors) - # Get ClientApp and SeverApp components - app_components = config["tool"]["flwr"]["app"]["components"] - client_app_attr = app_components["clientapp"] - server_app_attr = app_components["serverapp"] + if warnings: + log(WARNING, warnings) - override_config = parse_config_args( - [args.run_config] if args.run_config else args.run_config - ) - fused_config = get_fused_config_from_dir(app_path, override_config) - app_dir = args.app - is_app = True + if config is None: + raise ValueError("Config extracted from FAB's pyproject.toml is not valid") - else: - # Mode 2 - client_app_attr = args.client_app - server_app_attr = args.server_app - override_config = {} - fused_config = None - app_dir = args.app_dir - is_app = False + # Get ClientApp and SeverApp components + app_components = config["tool"]["flwr"]["app"]["components"] + client_app_attr = app_components["clientapp"] + server_app_attr = app_components["serverapp"] - # Create run - run = Run( - run_id=run_id, - fab_id="", - fab_version="", - fab_hash="", - override_config=override_config, + override_config = parse_config_args( + [args.run_config] if args.run_config else args.run_config ) + fused_config = get_fused_config_from_dir(app_path, override_config) + + # Create run + run = Run.create_empty(run_id) + run.override_config = override_config _run_simulation( server_app_attr=server_app_attr, @@ -213,13 +132,13 @@ def run_simulation_from_cli() -> None: num_supernodes=args.num_supernodes, backend_name=args.backend, backend_config=backend_config_dict, - app_dir=app_dir, + app_dir=args.app, run=run, enable_tf_gpu_growth=args.enable_tf_gpu_growth, delay_start=args.delay_start, verbose_logging=args.verbose, server_app_run_config=fused_config, - is_app=is_app, + is_app=True, exit_event=EventType.CLI_FLOWER_SIMULATION_LEAVE, ) @@ -310,6 +229,7 @@ def run_serverapp_th( f_stop: threading.Event, has_exception: threading.Event, enable_tf_gpu_growth: bool, + run_id: int, ) -> threading.Thread: """Run SeverApp in a thread.""" @@ -332,11 +252,20 @@ def server_th_with_start_checks( log(INFO, "Enabling GPU growth for Tensorflow on the server thread.") enable_gpu_growth() + # Initialize Context + context = Context( + run_id=run_id, + node_id=0, + node_config={}, + state=RecordSet(), + run_config=_server_app_run_config, + ) + # Run ServerApp - run_server_app( + _run( driver=_driver, + context=context, server_app_dir=_server_app_dir, - server_app_run_config=_server_app_run_config, server_app_attr=_server_app_attr, loaded_server_app=_server_app, ) @@ -389,7 +318,7 @@ def _main_loop( ) -> None: """Start ServerApp on a separate thread, then launch Simulation Engine.""" # Initialize StateFactory - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") f_stop = threading.Event() # A Threading event to indicate if an exception was raised in the ServerApp thread @@ -399,13 +328,17 @@ def _main_loop( try: # Register run log(DEBUG, "Pre-registering run with id %s", run.run_id) - state_factory.state().run_ids[run.run_id] = run # type: ignore + run.status = RunStatus(Status.RUNNING, "", "") + run.starting_at = now().isoformat() + run.running_at = run.starting_at + state_factory.state().run_ids[run.run_id] = RunRecord(run=run) # type: ignore if server_app_run_config is None: server_app_run_config = {} # Initialize Driver - driver = InMemoryDriver(run_id=run.run_id, state_factory=state_factory) + driver = InMemoryDriver(state_factory=state_factory) + driver.set_run(run_id=run.run_id) # Get and run ServerApp thread serverapp_th = run_serverapp_th( @@ -417,6 +350,7 @@ def _main_loop( f_stop=f_stop, has_exception=server_app_thread_has_exception, enable_tf_gpu_growth=enable_tf_gpu_growth, + run_id=run.run_id, ) # Buffer time so the `ServerApp` in separate thread is ready @@ -514,9 +448,7 @@ def _run_simulation( # If no `Run` object is set, create one if run is None: run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) - run = Run( - run_id=run_id, fab_id="", fab_version="", fab_hash="", override_config={} - ) + run = Run.create_empty(run_id=run_id) args = ( num_supernodes, @@ -566,20 +498,10 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: parser.add_argument( "--app", type=str, - default=None, + required=True, help="Path to a directory containing a FAB-like structure with a " "pyproject.toml.", ) - parser.add_argument( - "--server-app", - help="(DEPRECATED: use --app instead) For example: `server:app` or " - "`project.package.module:wrapper.app`", - ) - parser.add_argument( - "--client-app", - help="(DEPRECATED: use --app instead) For example: `client:app` or " - "`project.package.module:wrapper.app`", - ) parser.add_argument( "--num-supernodes", type=int, @@ -628,13 +550,6 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: help="When unset, only INFO, WARNING and ERROR log messages will be shown. " "If set, DEBUG-level logs will be displayed. ", ) - parser.add_argument( - "--app-dir", - default="", - help="Add specified directory to the PYTHONPATH and load" - "ClientApp and ServerApp from there." - " Default: current working directory.", - ) parser.add_argument( "--flwr-dir", default=None, diff --git a/src/py/flwr/simulation/simulationio_connection.py b/src/py/flwr/simulation/simulationio_connection.py new file mode 100644 index 000000000000..ab6e5450c90e --- /dev/null +++ b/src/py/flwr/simulation/simulationio_connection.py @@ -0,0 +1,86 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SimulationIo connection.""" + + +from logging import DEBUG, WARNING +from typing import Optional, cast + +import grpc + +from flwr.common.constant import SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS +from flwr.common.grpc import create_channel +from flwr.common.logger import log +from flwr.proto.simulationio_pb2_grpc import SimulationIoStub # pylint: disable=E0611 + + +class SimulationIoConnection: + """`SimulationIoConnection` provides an interface to the SimulationIo API. + + Parameters + ---------- + simulationio_service_address : str (default: "[::]:9094") + The address (URL, IPv6, IPv4) of the SuperLink SimulationIo API service. + root_certificates : Optional[bytes] (default: None) + The PEM-encoded root certificates as a byte string. + If provided, a secure connection using the certificates will be + established to an SSL-enabled Flower server. + """ + + def __init__( # pylint: disable=too-many-arguments + self, + simulationio_service_address: str = SIMULATIONIO_API_DEFAULT_CLIENT_ADDRESS, + root_certificates: Optional[bytes] = None, + ) -> None: + self._addr = simulationio_service_address + self._cert = root_certificates + self._grpc_stub: Optional[SimulationIoStub] = None + self._channel: Optional[grpc.Channel] = None + + @property + def _is_connected(self) -> bool: + """Check if connected to the SimulationIo API server.""" + return self._channel is not None + + @property + def _stub(self) -> SimulationIoStub: + """SimulationIo stub.""" + if not self._is_connected: + self._connect() + return cast(SimulationIoStub, self._grpc_stub) + + def _connect(self) -> None: + """Connect to the SimulationIo API.""" + if self._is_connected: + log(WARNING, "Already connected") + return + self._channel = create_channel( + server_address=self._addr, + insecure=(self._cert is None), + root_certificates=self._cert, + ) + self._grpc_stub = SimulationIoStub(self._channel) + log(DEBUG, "[SimulationIO] Connected to %s", self._addr) + + def _disconnect(self) -> None: + """Disconnect from the SimulationIo API.""" + if not self._is_connected: + log(DEBUG, "Already disconnected") + return + channel: grpc.Channel = self._channel + self._channel = None + self._grpc_stub = None + channel.close() + log(DEBUG, "[SimulationIO] Disconnected") diff --git a/src/py/flwr/superexec/app.py b/src/py/flwr/superexec/app.py index c00aa0f88e7b..b9ef88e3c05d 100644 --- a/src/py/flwr/superexec/app.py +++ b/src/py/flwr/superexec/app.py @@ -16,20 +16,11 @@ import argparse import sys -from logging import INFO, WARN -from pathlib import Path -from typing import Optional +from logging import INFO -import grpc - -from flwr.common import EventType, event, log -from flwr.common.address import parse_address -from flwr.common.config import parse_config_args -from flwr.common.constant import EXEC_API_DEFAULT_ADDRESS -from flwr.common.exit_handlers import register_exit_handlers +from flwr.common import log from flwr.common.object_ref import load_app, validate -from .exec_grpc import run_superexec_api_grpc from .executor import Executor @@ -37,133 +28,14 @@ def run_superexec() -> None: """Run Flower SuperExec.""" log(INFO, "Starting Flower SuperExec") - event(EventType.RUN_SUPEREXEC_ENTER) - - args = _parse_args_run_superexec().parse_args() - - # Parse IP address - parsed_address = parse_address(args.address) - if not parsed_address: - sys.exit(f"SuperExec IP address ({args.address}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - - # Obtain certificates - certificates = _try_obtain_certificates(args) - - # Start SuperExec API - superexec_server: grpc.Server = run_superexec_api_grpc( - address=address, - executor=_load_executor(args), - certificates=certificates, - config=parse_config_args( - [args.executor_config] if args.executor_config else args.executor_config - ), - ) - - grpc_servers = [superexec_server] - - # Graceful shutdown - register_exit_handlers( - event_type=EventType.RUN_SUPEREXEC_LEAVE, - grpc_servers=grpc_servers, - bckg_threads=None, - ) - - superexec_server.wait_for_termination() - - -def _parse_args_run_superexec() -> argparse.ArgumentParser: - """Parse command line arguments for SuperExec.""" - parser = argparse.ArgumentParser( - description="Start a Flower SuperExec", - ) - parser.add_argument( - "--address", - help="SuperExec (gRPC) server address (IPv4, IPv6, or a domain name)", - default=EXEC_API_DEFAULT_ADDRESS, - ) - parser.add_argument( - "--executor", - help="For example: `deployment:exec` or `project.package.module:wrapper.exec`.", - default="flwr.superexec.deployment:executor", - ) - parser.add_argument( - "--executor-dir", - help="The directory for the executor.", - default=".", - ) - parser.add_argument( - "--executor-config", - help="Key-value pairs for the executor config, separated by spaces. " - 'For example:\n\n`--executor-config \'superlink="superlink:9091" ' - 'root-certificates="certificates/superlink-ca.crt"\'`', - ) - parser.add_argument( - "--insecure", - action="store_true", - help="Run the SuperExec without HTTPS, regardless of whether certificate " - "paths are provided. By default, the server runs with HTTPS enabled. " - "Use this flag only if you understand the risks.", - ) - parser.add_argument( - "--ssl-certfile", - help="SuperExec server SSL certificate file (as a path str) " - "to create a secure connection.", - type=str, - default=None, - ) - parser.add_argument( - "--ssl-keyfile", - help="SuperExec server SSL private key file (as a path str) " - "to create a secure connection.", - type=str, - ) - parser.add_argument( - "--ssl-ca-certfile", - help="SuperExec server SSL CA certificate file (as a path str) " - "to create a secure connection.", - type=str, - ) - return parser - - -def _try_obtain_certificates( - args: argparse.Namespace, -) -> Optional[tuple[bytes, bytes, bytes]]: - # Obtain certificates - if args.insecure: - log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") - return None - # Check if certificates are provided - if args.ssl_certfile and args.ssl_keyfile and args.ssl_ca_certfile: - if not Path(args.ssl_ca_certfile).is_file(): - sys.exit("Path argument `--ssl-ca-certfile` does not point to a file.") - if not Path(args.ssl_certfile).is_file(): - sys.exit("Path argument `--ssl-certfile` does not point to a file.") - if not Path(args.ssl_keyfile).is_file(): - sys.exit("Path argument `--ssl-keyfile` does not point to a file.") - certificates = ( - Path(args.ssl_ca_certfile).read_bytes(), # CA certificate - Path(args.ssl_certfile).read_bytes(), # server certificate - Path(args.ssl_keyfile).read_bytes(), # server private key - ) - return certificates - if args.ssl_certfile or args.ssl_keyfile or args.ssl_ca_certfile: - sys.exit( - "You need to provide valid file paths to `--ssl-certfile`, " - "`--ssl-keyfile`, and `—-ssl-ca-certfile` to create a secure " - "connection in SuperExec server (gRPC-rere)." - ) sys.exit( - "Certificates are required unless running in insecure mode. " - "Please provide certificate paths to `--ssl-certfile`, " - "`--ssl-keyfile`, and `—-ssl-ca-certfile` or run the server " - "in insecure mode using '--insecure' if you understand the risks." + "Manually launching the SuperExec is deprecated. Since `flwr 1.13.0` " + "the executor service runs in the SuperLink. Launching it manually is not " + "recommended." ) -def _load_executor( +def load_executor( args: argparse.Namespace, ) -> Executor: """Get the executor plugin.""" diff --git a/src/py/flwr/superexec/deployment.py b/src/py/flwr/superexec/deployment.py index 331fd817228e..13c1034abc35 100644 --- a/src/py/flwr/superexec/deployment.py +++ b/src/py/flwr/superexec/deployment.py @@ -15,23 +15,26 @@ """Deployment engine executor.""" import hashlib -import subprocess from logging import ERROR, INFO from pathlib import Path from typing import Optional from typing_extensions import override -from flwr.cli.install import install_from_fab -from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS -from flwr.common.grpc import create_channel +from flwr.cli.config_utils import get_fab_metadata +from flwr.common import ConfigsRecord, Context, RecordSet +from flwr.common.constant import ( + SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS, + Status, + SubStatus, +) from flwr.common.logger import log -from flwr.common.serde import fab_to_proto, user_config_to_proto -from flwr.common.typing import Fab, UserConfig -from flwr.proto.driver_pb2_grpc import DriverStub -from flwr.proto.run_pb2 import CreateRunRequest # pylint: disable=E0611 +from flwr.common.typing import Fab, RunStatus, UserConfig +from flwr.server.superlink.ffs import Ffs +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory -from .executor import Executor, RunTracker +from .executor import Executor class DeploymentEngine(Executor): @@ -39,7 +42,7 @@ class DeploymentEngine(Executor): Parameters ---------- - superlink: str (default: "0.0.0.0:9091") + serverappio_api_address: str (default: "127.0.0.1:9091") Address of the SuperLink to connect to. root_certificates: Optional[str] (default: None) Specifies the path to the PEM-encoded root certificate file for @@ -50,11 +53,11 @@ class DeploymentEngine(Executor): def __init__( self, - superlink: str = DRIVER_API_DEFAULT_ADDRESS, + serverappio_api_address: str = SERVERAPPIO_API_DEFAULT_CLIENT_ADDRESS, root_certificates: Optional[str] = None, flwr_dir: Optional[str] = None, ) -> None: - self.superlink = superlink + self.serverappio_api_address = serverappio_api_address if root_certificates is None: self.root_certificates = None self.root_certificates_bytes = None @@ -62,7 +65,30 @@ def __init__( self.root_certificates = root_certificates self.root_certificates_bytes = Path(root_certificates).read_bytes() self.flwr_dir = flwr_dir - self.stub: Optional[DriverStub] = None + self.linkstate_factory: Optional[LinkStateFactory] = None + self.ffs_factory: Optional[FfsFactory] = None + + @override + def initialize( + self, linkstate_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: + """Initialize the executor with the necessary factories.""" + self.linkstate_factory = linkstate_factory + self.ffs_factory = ffs_factory + + @property + def linkstate(self) -> LinkState: + """Return the LinkState.""" + if self.linkstate_factory is None: + raise RuntimeError("Executor is not initialized.") + return self.linkstate_factory.state() + + @property + def ffs(self) -> Ffs: + """Return the Flower File Storage (FFS).""" + if self.ffs_factory is None: + raise RuntimeError("Executor is not initialized.") + return self.ffs_factory.ffs() @override def set_config( @@ -77,7 +103,7 @@ def set_config( A dictionary for configuration values. Supported configuration key/value pairs: - "superlink": str - The address of the SuperLink Driver API. + The address of the SuperLink ServerAppIo API. - "root-certificates": str The path to the root certificates. - "flwr-dir": str @@ -88,7 +114,7 @@ def set_config( if superlink_address := config.get("superlink"): if not isinstance(superlink_address, str): raise ValueError("The `superlink` value should be of type `str`.") - self.superlink = superlink_address + self.serverappio_api_address = superlink_address if root_certificates := config.get("root-certificates"): if not isinstance(root_certificates, str): raise ValueError( @@ -101,85 +127,60 @@ def set_config( raise ValueError("The `flwr-dir` value should be of type `str`.") self.flwr_dir = str(flwr_dir) - def _connect(self) -> None: - if self.stub is not None: - return - channel = create_channel( - server_address=self.superlink, - insecure=(self.root_certificates_bytes is None), - root_certificates=self.root_certificates_bytes, - ) - self.stub = DriverStub(channel) - def _create_run( self, fab: Fab, override_config: UserConfig, ) -> int: - if self.stub is None: - self._connect() + fab_hash = self.ffs.put(fab.content, {}) + if fab_hash != fab.hash_str: + raise RuntimeError( + f"FAB ({fab.hash_str}) hash from request doesn't match contents" + ) + fab_id, fab_version = get_fab_metadata(fab.content) - assert self.stub is not None + run_id = self.linkstate.create_run( + fab_id, fab_version, fab_hash, override_config, ConfigsRecord() + ) + return run_id - req = CreateRunRequest( - fab=fab_to_proto(fab), - override_config=user_config_to_proto(override_config), + def _create_context(self, run_id: int) -> None: + """Register a Context for a Run.""" + # Create an empty context for the Run + context = Context( + run_id=run_id, node_id=0, node_config={}, state=RecordSet(), run_config={} ) - res = self.stub.CreateRun(request=req) - return int(res.run_id) + + # Register the context at the LinkState + self.linkstate.set_serverapp_context(run_id=run_id, context=context) @override def start_run( self, fab_file: bytes, override_config: UserConfig, - federation_config: UserConfig, - ) -> Optional[RunTracker]: + federation_options: ConfigsRecord, + ) -> Optional[int]: """Start run using the Flower Deployment Engine.""" + run_id = None try: - # Install FAB to flwr dir - install_from_fab(fab_file, None, True) # Call SuperLink to create run - run_id: int = self._create_run( + run_id = self._create_run( Fab(hashlib.sha256(fab_file).hexdigest(), fab_file), override_config ) - log(INFO, "Created run %s", str(run_id)) - command = [ - "flower-server-app", - "--run-id", - str(run_id), - "--superlink", - str(self.superlink), - ] - - if self.flwr_dir: - command.append("--flwr-dir") - command.append(self.flwr_dir) - - if self.root_certificates is None: - command.append("--insecure") - else: - command.append("--root-certificates") - command.append(self.root_certificates) - - # Execute the command - proc = subprocess.Popen( # pylint: disable=consider-using-with - command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) - log(INFO, "Started run %s", str(run_id)) + # Register context for the Run + self._create_context(run_id=run_id) + log(INFO, "Created run %s", str(run_id)) - return RunTracker( - run_id=run_id, - proc=proc, - ) + return run_id # pylint: disable-next=broad-except except Exception as e: log(ERROR, "Could not start run: %s", str(e)) + if run_id: + run_status = RunStatus(Status.FINISHED, SubStatus.FAILED, str(e)) + self.linkstate.update_run_status(run_id, new_status=run_status) return None diff --git a/src/py/flwr/superexec/exec_grpc.py b/src/py/flwr/superexec/exec_grpc.py index 017395bc8002..f8c9722ba5ac 100644 --- a/src/py/flwr/superexec/exec_grpc.py +++ b/src/py/flwr/superexec/exec_grpc.py @@ -23,33 +23,40 @@ from flwr.common.logger import log from flwr.common.typing import UserConfig from flwr.proto.exec_pb2_grpc import add_ExecServicer_to_server +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.grpc_bidi.grpc_server import generic_create_grpc_server +from flwr.server.superlink.linkstate import LinkStateFactory from .exec_servicer import ExecServicer from .executor import Executor -def run_superexec_api_grpc( +# pylint: disable-next=too-many-arguments, too-many-positional-arguments +def run_exec_api_grpc( address: str, executor: Executor, + state_factory: LinkStateFactory, + ffs_factory: FfsFactory, certificates: Optional[tuple[bytes, bytes, bytes]], config: UserConfig, ) -> grpc.Server: - """Run SuperExec API (gRPC, request-response).""" + """Run Exec API (gRPC, request-response).""" executor.set_config(config) exec_servicer: grpc.Server = ExecServicer( + linkstate_factory=state_factory, + ffs_factory=ffs_factory, executor=executor, ) - superexec_add_servicer_to_server_fn = add_ExecServicer_to_server - superexec_grpc_server = generic_create_grpc_server( - servicer_and_add_fn=(exec_servicer, superexec_add_servicer_to_server_fn), + exec_add_servicer_to_server_fn = add_ExecServicer_to_server + exec_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=(exec_servicer, exec_add_servicer_to_server_fn), server_address=address, max_message_length=GRPC_MAX_MESSAGE_LENGTH, certificates=certificates, ) - log(INFO, "Starting Flower SuperExec gRPC server on %s", address) - superexec_grpc_server.start() + log(INFO, "Flower Deployment Engine: Starting Exec API on %s", address) + exec_grpc_server.start() - return superexec_grpc_server + return exec_grpc_server diff --git a/src/py/flwr/superexec/exec_servicer.py b/src/py/flwr/superexec/exec_servicer.py index ebb12b5ddbd2..3a484ea8c47c 100644 --- a/src/py/flwr/superexec/exec_servicer.py +++ b/src/py/flwr/superexec/exec_servicer.py @@ -15,9 +15,6 @@ """SuperExec API servicer.""" -import select -import sys -import threading import time from collections.abc import Generator from logging import ERROR, INFO @@ -25,27 +22,42 @@ import grpc +from flwr.common import now +from flwr.common.constant import LOG_STREAM_INTERVAL, Status from flwr.common.logger import log -from flwr.common.serde import user_config_from_proto +from flwr.common.serde import ( + configs_record_from_proto, + run_to_proto, + user_config_from_proto, +) from flwr.proto import exec_pb2_grpc # pylint: disable=E0611 from flwr.proto.exec_pb2 import ( # pylint: disable=E0611 + ListRunsRequest, + ListRunsResponse, StartRunRequest, StartRunResponse, StreamLogsRequest, StreamLogsResponse, ) +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory -from .executor import Executor, RunTracker - -SELECT_TIMEOUT = 1 # Timeout for selecting ready-to-read file descriptors (in seconds) +from .executor import Executor class ExecServicer(exec_pb2_grpc.ExecServicer): """SuperExec API servicer.""" - def __init__(self, executor: Executor) -> None: + def __init__( + self, + linkstate_factory: LinkStateFactory, + ffs_factory: FfsFactory, + executor: Executor, + ) -> None: + self.linkstate_factory = linkstate_factory + self.ffs_factory = ffs_factory self.executor = executor - self.runs: dict[int, RunTracker] = {} + self.executor.initialize(linkstate_factory, ffs_factory) def StartRun( self, request: StartRunRequest, context: grpc.ServicerContext @@ -53,84 +65,72 @@ def StartRun( """Create run ID.""" log(INFO, "ExecServicer.StartRun") - run = self.executor.start_run( + run_id = self.executor.start_run( request.fab.content, user_config_from_proto(request.override_config), - user_config_from_proto(request.federation_config), + configs_record_from_proto(request.federation_options), ) - if run is None: + if run_id is None: log(ERROR, "Executor failed to start run") return StartRunResponse() - self.runs[run.run_id] = run - - # Start a background thread to capture the log output - capture_thread = threading.Thread( - target=_capture_logs, args=(run,), daemon=True - ) - capture_thread.start() - - return StartRunResponse(run_id=run.run_id) + return StartRunResponse(run_id=run_id) def StreamLogs( # pylint: disable=C0103 self, request: StreamLogsRequest, context: grpc.ServicerContext ) -> Generator[StreamLogsResponse, Any, None]: """Get logs.""" log(INFO, "ExecServicer.StreamLogs") + state = self.linkstate_factory.state() + + # Retrieve run ID + run_id = request.run_id # Exit if `run_id` not found - if request.run_id not in self.runs: + if not state.get_run(run_id): context.abort(grpc.StatusCode.NOT_FOUND, "Run ID not found") - last_sent_index = 0 + after_timestamp = request.after_timestamp + 1e-6 while context.is_active(): - # Yield n'th row of logs, if n'th row < len(logs) - logs = self.runs[request.run_id].logs - for i in range(last_sent_index, len(logs)): - yield StreamLogsResponse(log_output=logs[i]) - last_sent_index = len(logs) + log_msg, latest_timestamp = state.get_serverapp_log(run_id, after_timestamp) + if log_msg: + yield StreamLogsResponse( + log_output=log_msg, + latest_timestamp=latest_timestamp, + ) + # Add a small epsilon to the latest timestamp to avoid getting + # the same log + after_timestamp = max(latest_timestamp + 1e-6, after_timestamp) # Wait for and continue to yield more log responses only if the # run isn't completed yet. If the run is finished, the entire log # is returned at this point and the server ends the stream. - if self.runs[request.run_id].proc.poll() is not None: + run_status = state.get_run_status({run_id})[run_id] + if run_status.status == Status.FINISHED: log(INFO, "All logs for run ID `%s` returned", request.run_id) - context.set_code(grpc.StatusCode.OK) context.cancel() - time.sleep(1.0) # Sleep briefly to avoid busy waiting - - -def _capture_logs( - run: RunTracker, -) -> None: - while True: - # Explicitly check if Popen.poll() is None. Required for `pytest`. - if run.proc.poll() is None: - # Select streams only when ready to read - ready_to_read, _, _ = select.select( - [run.proc.stdout, run.proc.stderr], - [], - [], - SELECT_TIMEOUT, - ) - # Read from std* and append to RunTracker.logs - for stream in ready_to_read: - # Flush stdout to view output in real time - readline = stream.readline() - sys.stdout.write(readline) - sys.stdout.flush() - # Append to logs - line = readline.rstrip() - if line: - run.logs.append(f"{line}") - - # Close std* to prevent blocking - elif run.proc.poll() is not None: - log(INFO, "Subprocess finished, exiting log capture") - if run.proc.stdout: - run.proc.stdout.close() - if run.proc.stderr: - run.proc.stderr.close() - break + time.sleep(LOG_STREAM_INTERVAL) # Sleep briefly to avoid busy waiting + + def ListRuns( + self, request: ListRunsRequest, context: grpc.ServicerContext + ) -> ListRunsResponse: + """Handle `flwr ls` command.""" + log(INFO, "ExecServicer.List") + state = self.linkstate_factory.state() + + # Handle `flwr ls --runs` + if not request.HasField("run_id"): + return _create_list_runs_response(state.get_run_ids(), state) + # Handle `flwr ls --run-id ` + return _create_list_runs_response({request.run_id}, state) + + +def _create_list_runs_response(run_ids: set[int], state: LinkState) -> ListRunsResponse: + """Create response for `flwr ls --runs` and `flwr ls --run-id `.""" + run_dict = {run_id: state.get_run(run_id) for run_id in run_ids} + return ListRunsResponse( + run_dict={run_id: run_to_proto(run) for run_id, run in run_dict.items() if run}, + now=now().isoformat(), + ) diff --git a/src/py/flwr/superexec/exec_servicer_test.py b/src/py/flwr/superexec/exec_servicer_test.py index b777bc806fe5..6045d6eb1a63 100644 --- a/src/py/flwr/superexec/exec_servicer_test.py +++ b/src/py/flwr/superexec/exec_servicer_test.py @@ -16,11 +16,19 @@ import subprocess +import unittest +from datetime import datetime from unittest.mock import MagicMock, Mock -from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 +from flwr.common import ConfigsRecord, now +from flwr.proto.exec_pb2 import ( # pylint: disable=E0611 + ListRunsRequest, + StartRunRequest, +) +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkStateFactory -from .exec_servicer import ExecServicer, _capture_logs +from .exec_servicer import ExecServicer def test_start_run() -> None: @@ -36,7 +44,7 @@ def test_start_run() -> None: run_res.proc = proc executor = MagicMock() - executor.start_run = lambda _, __, ___: run_res + executor.start_run = lambda _, __, ___: run_res.run_id context_mock = MagicMock() @@ -44,26 +52,55 @@ def test_start_run() -> None: request.fab.content = b"test" # Create a instance of FlowerServiceServicer - servicer = ExecServicer(executor=executor) + servicer = ExecServicer(Mock(), Mock(), executor=executor) # Execute response = servicer.StartRun(request, context_mock) - assert response.run_id == 10 -def test_capture_logs() -> None: - """Test capture_logs function.""" - run_res = Mock() - run_res.logs = [] - with subprocess.Popen( - ["echo", "success"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) as proc: - run_res.proc = proc - _capture_logs(run_res) +class TestExecServicer(unittest.TestCase): + """Test the Exec API servicer.""" + + def setUp(self) -> None: + """Set up test fixtures.""" + self.servicer = ExecServicer( + linkstate_factory=LinkStateFactory(":flwr-in-memory-state:"), + ffs_factory=FfsFactory("./tmp"), + executor=Mock(), + ) + self.state = self.servicer.linkstate_factory.state() + + def test_list_runs(self) -> None: + """Test List method of ExecServicer with --runs option.""" + # Prepare + run_ids = set() + for _ in range(3): + run_id = self.state.create_run( + "mock fabid", "mock fabver", "fake hash", {}, ConfigsRecord() + ) + run_ids.add(run_id) + + # Execute + response = self.servicer.ListRuns(ListRunsRequest(), Mock()) + retrieved_timestamp = datetime.fromisoformat(response.now).timestamp() + + # Assert + self.assertLess(abs(retrieved_timestamp - now().timestamp()), 1e-3) + self.assertEqual(set(response.run_dict.keys()), run_ids) + + def test_list_run_id(self) -> None: + """Test List method of ExecServicer with --run-id option.""" + # Prepare + for _ in range(3): + run_id = self.state.create_run( + "mock fabid", "mock fabver", "fake hash", {}, ConfigsRecord() + ) + + # Execute + response = self.servicer.ListRuns(ListRunsRequest(run_id=run_id), Mock()) + retrieved_timestamp = datetime.fromisoformat(response.now).timestamp() - assert len(run_res.logs) == 1 - assert run_res.logs[0] == "success" + # Assert + self.assertLess(abs(retrieved_timestamp - now().timestamp()), 1e-3) + self.assertEqual(set(response.run_dict.keys()), {run_id}) diff --git a/src/py/flwr/superexec/executor.py b/src/py/flwr/superexec/executor.py index 08b66a438e4d..a4c73a7b19fe 100644 --- a/src/py/flwr/superexec/executor.py +++ b/src/py/flwr/superexec/executor.py @@ -19,7 +19,10 @@ from subprocess import Popen from typing import Optional +from flwr.common import ConfigsRecord from flwr.common.typing import UserConfig +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkStateFactory @dataclass @@ -34,6 +37,23 @@ class RunTracker: class Executor(ABC): """Execute and monitor a Flower run.""" + @abstractmethod + def initialize( + self, linkstate_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: + """Initialize the executor with the necessary factories. + + This method sets up the executor by providing it with the factories required + to access the LinkState and the Flower File Storage (FFS) in the SuperLink. + + Parameters + ---------- + linkstate_factory : LinkStateFactory + The factory to create access to the LinkState. + ffs_factory : FfsFactory + The factory to create access to the Flower File Storage (FFS). + """ + @abstractmethod def set_config( self, @@ -52,8 +72,8 @@ def start_run( self, fab_file: bytes, override_config: UserConfig, - federation_config: UserConfig, - ) -> Optional[RunTracker]: + federation_options: ConfigsRecord, + ) -> Optional[int]: """Start a run using the given Flower FAB ID and version. This method creates a new run on the SuperLink, returns its run_id @@ -65,12 +85,11 @@ def start_run( The Flower App Bundle file bytes. override_config: UserConfig The config overrides dict sent by the user (using `flwr run`). - federation_config: UserConfig - The federation options dict sent by the user (using `flwr run`). + federation_options: ConfigsRecord + The federation options sent by the user (using `flwr run`). Returns ------- - run_id : Optional[RunTracker] - The run_id and the associated process of the run created by the SuperLink, - or `None` if it fails. + run_id : Optional[int] + The run_id of the run created by the SuperLink, or `None` if it fails. """ diff --git a/src/py/flwr/superexec/simulation.py b/src/py/flwr/superexec/simulation.py index e913b6812556..09b7c5a9731d 100644 --- a/src/py/flwr/superexec/simulation.py +++ b/src/py/flwr/superexec/simulation.py @@ -15,100 +15,60 @@ """Simulation engine executor.""" -import json -import subprocess -import sys -from logging import ERROR, INFO, WARN +import hashlib +from logging import ERROR, INFO from typing import Optional from typing_extensions import override -from flwr.cli.config_utils import load_and_validate -from flwr.cli.install import install_from_fab -from flwr.common.config import unflatten_dict -from flwr.common.constant import RUN_ID_NUM_BYTES +from flwr.cli.config_utils import get_fab_metadata +from flwr.common import ConfigsRecord, Context, RecordSet from flwr.common.logger import log -from flwr.common.typing import UserConfig -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes - -from .executor import Executor, RunTracker - - -def _user_config_to_str(user_config: UserConfig) -> str: - """Convert override user config to string.""" - user_config_list_str = [] - for key, value in user_config.items(): - if isinstance(value, bool): - user_config_list_str.append(f"{key}={str(value).lower()}") - elif isinstance(value, (int, float)): - user_config_list_str.append(f"{key}={value}") - elif isinstance(value, str): - user_config_list_str.append(f'{key}="{value}"') - else: - raise ValueError( - "Only types `bool`, `float`, `int` and `str` are supported" - ) +from flwr.common.typing import Fab, UserConfig +from flwr.server.superlink.ffs import Ffs +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory - user_config_str = ",".join(user_config_list_str) - return user_config_str +from .executor import Executor class SimulationEngine(Executor): - """Simulation engine executor. - - Parameters - ---------- - num_supernodes: Opitonal[str] (default: None) - Total number of nodes to involve in the simulation. - """ + """Simulation engine executor.""" def __init__( self, - num_supernodes: Optional[int] = None, - verbose: Optional[bool] = False, ) -> None: - self.num_supernodes = num_supernodes - self.verbose = verbose + self.linkstate_factory: Optional[LinkStateFactory] = None + self.ffs_factory: Optional[FfsFactory] = None + + @override + def initialize( + self, linkstate_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: + """Initialize the executor with the necessary factories.""" + self.linkstate_factory = linkstate_factory + self.ffs_factory = ffs_factory + + @property + def linkstate(self) -> LinkState: + """Return the LinkState.""" + if self.linkstate_factory is None: + raise RuntimeError("Executor is not initialized.") + return self.linkstate_factory.state() + + @property + def ffs(self) -> Ffs: + """Return the Flower File Storage (FFS).""" + if self.ffs_factory is None: + raise RuntimeError("Executor is not initialized.") + return self.ffs_factory.ffs() @override def set_config( self, config: UserConfig, ) -> None: - """Set executor config arguments. - - Parameters - ---------- - config : UserConfig - A dictionary for configuration values. - Supported configuration key/value pairs: - - "num-supernodes": int - Number of nodes to register for the simulation. - - "verbose": bool - Set verbosity of logs. - """ - if num_supernodes := config.get("num-supernodes"): - if not isinstance(num_supernodes, int): - raise ValueError("The `num-supernodes` value should be of type `int`.") - self.num_supernodes = num_supernodes - elif self.num_supernodes is None: - log( - ERROR, - "To start a run with the simulation plugin, please specify " - "the number of SuperNodes. This can be done by using the " - "`--executor-config` argument when launching the SuperExec.", - ) - raise ValueError( - "`num-supernodes` must not be `None`, it must be a valid " - "positive integer." - ) - - if verbose := config.get("verbose"): - if not isinstance(verbose, bool): - raise ValueError( - "The `verbose` value must be a string `true` or `false`." - ) - self.verbose = verbose + """Set executor config arguments.""" # pylint: disable=too-many-locals @override @@ -116,92 +76,44 @@ def start_run( self, fab_file: bytes, override_config: UserConfig, - federation_config: UserConfig, - ) -> Optional[RunTracker]: + federation_options: ConfigsRecord, + ) -> Optional[int]: """Start run using the Flower Simulation Engine.""" - if self.num_supernodes is None: - raise ValueError( - "Error in `SuperExec` (`SimulationEngine` executor):\n\n" - "`num-supernodes` must not be `None`, it must be a valid " - "positive integer. In order to start this simulation executor " - "with a specified number of `SuperNodes`, you can either provide " - "a `--executor` that has been initialized with a number of nodes " - "to the `flower-superexec` CLI, or `--executor-config num-supernodes=N`" - "to the `flower-superexec` CLI." - ) try: - - # Install FAB to flwr dir - fab_path = install_from_fab(fab_file, None, True) - - # Install FAB Python package - subprocess.run( - [sys.executable, "-m", "pip", "install", "--no-deps", str(fab_path)], - stdout=None if self.verbose else subprocess.DEVNULL, - stderr=None if self.verbose else subprocess.DEVNULL, - check=True, - ) - - # Load and validate config - config, errors, warnings = load_and_validate(fab_path / "pyproject.toml") - if errors: - raise ValueError(errors) - - if warnings: - log(WARN, warnings) - - if config is None: + # Check that num-supernodes is set + if "num-supernodes" not in federation_options: raise ValueError( - "Config extracted from FAB's pyproject.toml is not valid" + "Federation options doesn't contain key `num-supernodes`." ) - # Flatten federated config - federation_config_flat = unflatten_dict(federation_config) + # Create run + fab = Fab(hashlib.sha256(fab_file).hexdigest(), fab_file) + fab_hash = self.ffs.put(fab.content, {}) + if fab_hash != fab.hash_str: + raise RuntimeError( + f"FAB ({fab.hash_str}) hash from request doesn't match contents" + ) + fab_id, fab_version = get_fab_metadata(fab.content) - num_supernodes = federation_config_flat.get( - "num-supernodes", self.num_supernodes + run_id = self.linkstate.create_run( + fab_id, fab_version, fab_hash, override_config, federation_options ) - backend_cfg = federation_config_flat.get("backend", {}) - verbose: Optional[bool] = federation_config_flat.get("verbose") - - # In Simulation there is no SuperLink, still we create a run_id - run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) - log(INFO, "Created run %s", str(run_id)) - # Prepare commnand - command = [ - "flower-simulation", - "--app", - f"{str(fab_path)}", - "--num-supernodes", - f"{num_supernodes}", - "--run-id", - str(run_id), - ] - - if backend_cfg: - # Stringify as JSON - command.extend(["--backend-config", json.dumps(backend_cfg)]) - - if verbose: - command.extend(["--verbose"]) - - if override_config: - override_config_str = _user_config_to_str(override_config) - command.extend(["--run-config", f"{override_config_str}"]) - - # Start Simulation - proc = subprocess.Popen( # pylint: disable=consider-using-with - command, - text=True, + # Create an empty context for the Run + context = Context( + run_id=run_id, + node_id=0, + node_config={}, + state=RecordSet(), + run_config={}, ) - log(INFO, "Started run %s", str(run_id)) + # Register the context at the LinkState + self.linkstate.set_serverapp_context(run_id=run_id, context=context) - return RunTracker( - run_id=run_id, - proc=proc, - ) + log(INFO, "Created run %s", str(run_id)) + + return run_id # pylint: disable-next=broad-except except Exception as e: diff --git a/src/py/flwr_tool/protoc_test.py b/src/py/flwr_tool/protoc_test.py index f0784a4498d2..e2e14858174c 100644 --- a/src/py/flwr_tool/protoc_test.py +++ b/src/py/flwr_tool/protoc_test.py @@ -28,4 +28,4 @@ def test_directories() -> None: def test_proto_file_count() -> None: """Test if the correct number of proto files were captured by the glob.""" - assert len(PROTO_FILES) == 14 + assert len(PROTO_FILES) == 16 diff --git a/src/py/flwr_tool/update_changelog.py b/src/py/flwr_tool/update_changelog.py deleted file mode 100644 index e3cffff7e36c..000000000000 --- a/src/py/flwr_tool/update_changelog.py +++ /dev/null @@ -1,243 +0,0 @@ -# mypy: ignore-errors -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""This module is used to update the changelog.""" - - -import re -from sys import argv - -from github import Github - -REPO_NAME = "adap/flower" -CHANGELOG_FILE = "doc/source/ref-changelog.md" -CHANGELOG_SECTION_HEADER = "### Changelog entry" - - -def _get_latest_tag(gh_api): - """Retrieve the latest tag from the GitHub repository.""" - repo = gh_api.get_repo(REPO_NAME) - tags = repo.get_tags() - return tags[0] if tags.totalCount > 0 else None - - -def _get_pull_requests_since_tag(gh_api, tag): - """Get a list of pull requests merged into the main branch since a given tag.""" - repo = gh_api.get_repo(REPO_NAME) - commits = {commit.sha for commit in repo.compare(tag.commit.sha, "main").commits} - prs = set() - for pr_info in repo.get_pulls( - state="closed", sort="created", direction="desc", base="main" - ): - if pr_info.merge_commit_sha in commits: - prs.add(pr_info) - if len(prs) == len(commits): - break - return prs - - -def _format_pr_reference(title, number, url): - """Format a pull request reference as a markdown list item.""" - return f"- **{title.replace('*', '')}** ([#{number}]({url}))" - - -def _extract_changelog_entry(pr_info): - """Extract the changelog entry from a pull request's body.""" - if not pr_info.body: - return None, "general" - - entry_match = re.search( - f"{CHANGELOG_SECTION_HEADER}(.+?)(?=##|$)", pr_info.body, re.DOTALL - ) - if not entry_match: - return None, None - - entry_text = entry_match.group(1).strip() - - # Remove markdown comments - entry_text = re.sub(r"", "", entry_text, flags=re.DOTALL).strip() - - token_markers = { - "general": "", - "skip": "", - "baselines": "", - "examples": "", - "sdk": "", - "simulations": "", - } - - # Find the token based on the presence of its marker in entry_text - token = next( - (token for token, marker in token_markers.items() if marker in entry_text), None - ) - - return entry_text, token - - -def _update_changelog(prs): - """Update the changelog file with entries from provided pull requests.""" - with open(CHANGELOG_FILE, "r+", encoding="utf-8") as file: - content = file.read() - unreleased_index = content.find("## Unreleased") - - if unreleased_index == -1: - print("Unreleased header not found in the changelog.") - return - - # Find the end of the Unreleased section - next_header_index = content.find("##", unreleased_index + 1) - next_header_index = ( - next_header_index if next_header_index != -1 else len(content) - ) - - for pr_info in prs: - pr_entry_text, category = _extract_changelog_entry(pr_info) - - # Skip if PR should be skipped or already in changelog - if category == "skip" or f"#{pr_info.number}]" in content: - continue - - pr_reference = _format_pr_reference( - pr_info.title, pr_info.number, pr_info.html_url - ) - - # Process based on category - if category in ["general", "baselines", "examples", "sdk", "simulations"]: - entry_title = _get_category_title(category) - content = _update_entry( - content, - entry_title, - pr_info, - unreleased_index, - next_header_index, - ) - - elif pr_entry_text: - content = _insert_new_entry( - content, pr_info, pr_reference, pr_entry_text, unreleased_index - ) - - else: - content = _insert_entry_no_desc(content, pr_reference, unreleased_index) - - next_header_index = content.find("##", unreleased_index + 1) - next_header_index = ( - next_header_index if next_header_index != -1 else len(content) - ) - - # Finalize content update - file.seek(0) - file.write(content) - file.truncate() - - print("Changelog updated.") - - -def _get_category_title(category): - """Get the title of a changelog section based on its category.""" - headers = { - "general": "General improvements", - "baselines": "General updates to Flower Baselines", - "examples": "General updates to Flower Examples", - "sdk": "General updates to Flower SDKs", - "simulations": "General updates to Flower Simulations", - } - return headers.get(category, "") - - -def _update_entry( - content, category_title, pr_info, unreleased_index, next_header_index -): - """Update a specific section in the changelog content.""" - if ( - section_index := content.find( - category_title, unreleased_index, next_header_index - ) - ) != -1: - newline_index = content.find("\n", section_index) - closing_parenthesis_index = content.rfind(")", unreleased_index, newline_index) - updated_entry = f", [{pr_info.number}]({pr_info.html_url})" - content = ( - content[:closing_parenthesis_index] - + updated_entry - + content[closing_parenthesis_index:] - ) - else: - new_section = ( - f"\n- **{category_title}** ([#{pr_info.number}]({pr_info.html_url}))\n" - ) - insert_index = content.find("\n", unreleased_index) + 1 - content = content[:insert_index] + new_section + content[insert_index:] - return content - - -def _insert_new_entry(content, pr_info, pr_reference, pr_entry_text, unreleased_index): - """Insert a new entry into the changelog.""" - if (existing_entry_start := content.find(pr_entry_text)) != -1: - pr_ref_end = content.rfind("\n", 0, existing_entry_start) - updated_entry = ( - f"{content[pr_ref_end]}\n, [{pr_info.number}]({pr_info.html_url})" - ) - content = content[:pr_ref_end] + updated_entry + content[existing_entry_start:] - else: - insert_index = content.find("\n", unreleased_index) + 1 - - # Split the pr_entry_text into paragraphs - paragraphs = pr_entry_text.split("\n") - - # Indent each paragraph - indented_paragraphs = [ - " " + paragraph if paragraph else paragraph for paragraph in paragraphs - ] - - # Join the paragraphs back together, ensuring each is separated by a newline - indented_pr_entry_text = "\n".join(indented_paragraphs) - - content = ( - content[:insert_index] - + "\n" - + pr_reference - + "\n\n" - + indented_pr_entry_text - + "\n" - + content[insert_index:] - ) - return content - - -def _insert_entry_no_desc(content, pr_reference, unreleased_index): - """Insert a changelog entry for a pull request with no specific description.""" - insert_index = content.find("\n", unreleased_index) + 1 - content = ( - content[:insert_index] + "\n" + pr_reference + "\n" + content[insert_index:] - ) - return content - - -def main(): - """Update changelog using the descriptions of PRs since the latest tag.""" - # Initialize GitHub Client with provided token (as argument) - gh_api = Github(argv[1]) - latest_tag = _get_latest_tag(gh_api) - if not latest_tag: - print("No tags found in the repository.") - return - - prs = _get_pull_requests_since_tag(gh_api, latest_tag) - _update_changelog(prs) - - -if __name__ == "__main__": - main() diff --git a/src/swift/flwr/Sources/Flower/flwr.docc/flwr.md b/src/swift/flwr/Sources/Flower/flwr.docc/flwr.md index 3f293f89e231..75b6b96880ce 100644 --- a/src/swift/flwr/Sources/Flower/flwr.docc/flwr.md +++ b/src/swift/flwr/Sources/Flower/flwr.docc/flwr.md @@ -1,6 +1,6 @@ # ``flwr`` -Seamlessly integrate Flower federated learning framework into your existing machine learning project. +Seamlessly integrate Flower federated AI framework into your existing machine learning project. ## Overview