diff --git a/.asf.yaml b/.asf.yaml
index bec521f1a8c71..5c3b8cb98d964 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -35,12 +35,9 @@ github:
# Enable projects for project management boards
projects: true
enabled_merge_buttons:
- # enable squash button:
squash: true
- # disable merge button:
merge: false
- # disable rebase button:
- rebase: false
+ rebase: true
protected_branches:
master:
required_status_checks:
@@ -82,6 +79,7 @@ github:
branch-2.9: {}
branch-2.10: {}
branch-2.11: {}
+ branch-3.0: {}
notifications:
commits: commits@pulsar.apache.org
diff --git a/.github/ISSUE_TEMPLATE/flaky-test.yml b/.github/ISSUE_TEMPLATE/flaky-test.yml
index d3e5ec061713e..44ff64197822c 100644
--- a/.github/ISSUE_TEMPLATE/flaky-test.yml
+++ b/.github/ISSUE_TEMPLATE/flaky-test.yml
@@ -28,7 +28,7 @@ body:
attributes:
label: Search before asking
description: >
- Please search [issues](https://github.com/apache/pulsar/issues) to check if your issue has already been reported.
+ Please search [issues](https://github.com/apache/pulsar/issues) to check if your issue has already been reported. First search with the test method name and then with the test class name to see if there are reports for the same test method or the same test class.
options:
- label: >
I searched in the [issues](https://github.com/apache/pulsar/issues) and found nothing similar.
@@ -45,16 +45,12 @@ body:
attributes:
label: Exception stacktrace
description: |
- A few lines of the stack trace that shows at least the exception message and the line of test code where the stacktrace occurred.
+ Copy-paste the stack trace from the build log. If the stacktrace is >100 lines, you can limit the stack trace to the point where it includes the stack frame for the test method so that it's possible to find out where the exception occurred in the test.
value: |
-
+
+ ```
-
- Full exception stacktrace
-
- full exception stacktrace here
-
-
+ ```
validations:
required: true
- type: checkboxes
diff --git a/.github/ISSUE_TEMPLATE/pip.md b/.github/ISSUE_TEMPLATE/pip.md
new file mode 100644
index 0000000000000..e0bc586669493
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/pip.md
@@ -0,0 +1,9 @@
+---
+name: PIP
+about: '[DEPRECATED. see pip folder] Submit a Pulsar Improvement Proposal (PIP)'
+title: 'DEPRECATED - Read https://github.com/apache/pulsar/blob/master/pip/README.md'
+labels: PIP
+---
+
+We have stopped using GitHub issues to hold the PIP content.
+Please read [here](https://github.com/apache/pulsar/blob/master/pip/README.md) how to submit a PIP
diff --git a/.github/ISSUE_TEMPLATE/pip.yml b/.github/ISSUE_TEMPLATE/pip.yml
deleted file mode 100644
index cd9aac33194e2..0000000000000
--- a/.github/ISSUE_TEMPLATE/pip.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-name: PIP
-title: "PIP-XYZ: "
-description: Submit a Pulsar Improvement Proposal (PIP)
-labels: [ "PIP" ]
-body:
- - type: markdown
- attributes:
- value: |
- Thank you very much for submitting a Pulsar Improvement Proposal (PIP)! Here are instructions for creating a PIP using this issue template.
-
- Please send a note to the dev@pulsar.apache.org mailing list to start the discussion, using subject prefix `[DISCUSS] PIP-XYZ`. To determine the appropriate PIP number XYZ, inspect the [mailing list](https://lists.apache.org/list.html?dev@pulsar.apache.org) for the most recent PIP. Add 1 to that PIP's number to get your PIP's number.
-
- Based on the discussion and feedback, some changes might be applied by the author(s) to the text of the proposal.
-
- Once some consensus is reached, there will be a vote to formally approve the proposal. The vote will be held on the dev@pulsar.apache.org mailing list. Everyone is welcome to vote on the proposal, though it will considered to be binding only the vote of PMC members. It will be required to have a lazy majority of at least 3 binding +1s votes. The vote should stay open for at least 48 hours.
-
- When the vote is closed, if the outcome is positive, the state of the proposal is updated and the Pull Requests associated with this proposal can start to get merged into the master branch.
- - type: textarea
- attributes:
- label: Motivation
- description: |
- Explain why this change is needed, what benefits it would bring to Apache Pulsar and what problem it's trying to solve.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Goal
- description: |
- Define the scope of this proposal. Given the motivation stated above, what are the problems that this proposal is addressing and what other items will be considering out of scope, perhaps to be left to a different PIP.
- validations:
- required: true
- - type: textarea
- attributes:
- label: API Changes
- description: |
- Illustrate all the proposed changes to the API or wire protocol, with examples of all the newly added classes/methods, including Javadoc.
- - type: textarea
- attributes:
- label: Implementation
- description: |
- This should be a detailed description of all the changes that are expected to be made. It should be detailed enough that any developer that is familiar with Pulsar internals would be able to understand all the parts of the code changes for this proposal.
-
- This should also serve as documentation for any person that is trying to understand or debug the behavior of a certain feature.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Alternatives
- description: |
- If there are alternatives that were already considered by the authors or, after the discussion, by the community, and were rejected, please list them here along with the reason why they were rejected.
- - type: textarea
- attributes:
- label: Anything else?
- - type: markdown
- attributes:
- value: "Thanks for completing our form!"
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 04a65da3f36f8..fdb8459024b1f 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,7 @@
+
### Motivation
@@ -75,7 +75,7 @@ This change added tests and can be verified as follows:
-- [ ] `doc`
+- [ ] `doc`
- [ ] `doc-required`
- [ ] `doc-not-needed`
- [ ] `doc-complete`
diff --git a/.github/changes-filter.yaml b/.github/changes-filter.yaml
index 3ec2fc22946da..be6faa957887d 100644
--- a/.github/changes-filter.yaml
+++ b/.github/changes-filter.yaml
@@ -3,13 +3,13 @@
all:
- '**'
docs:
- - 'site2/**'
- - 'deployment/**'
- - '.asf.yaml'
- '*.md'
- '**/*.md'
+ - '.asf.yaml'
- '.github/changes-filter.yaml'
- '.github/ISSUE_TEMPLATE/**'
+ - '.idea/**'
+ - 'deployment/**'
- 'wiki/**'
tests:
- added|modified: '**/src/test/java/**/*.java'
diff --git a/.github/workflows/ci-go-functions.yaml b/.github/workflows/ci-go-functions.yaml
index f4d1ae0b99887..332ecf0f35229 100644
--- a/.github/workflows/ci-go-functions.yaml
+++ b/.github/workflows/ci-go-functions.yaml
@@ -32,7 +32,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
preconditions:
diff --git a/.github/workflows/ci-maven-cache-update.yaml b/.github/workflows/ci-maven-cache-update.yaml
index 87570586fde6e..f20feeea5c9db 100644
--- a/.github/workflows/ci-maven-cache-update.yaml
+++ b/.github/workflows/ci-maven-cache-update.yaml
@@ -42,13 +42,14 @@ on:
- cron: '30 */12 * * *'
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
update-maven-dependencies-cache:
name: Update Maven dependency cache for ${{ matrix.name }}
env:
JOB_NAME: Update Maven dependency cache for ${{ matrix.name }}
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ${{ matrix.runs-on }}
timeout-minutes: 45
@@ -77,11 +78,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Detect changed files
if: ${{ github.event_name != 'schedule' }}
id: changes
diff --git a/.github/workflows/ci-owasp-dependency-check.yaml b/.github/workflows/ci-owasp-dependency-check.yaml
index 194d88c582d42..090221e699d01 100644
--- a/.github/workflows/ci-owasp-dependency-check.yaml
+++ b/.github/workflows/ci-owasp-dependency-check.yaml
@@ -24,7 +24,7 @@ on:
workflow_dispatch:
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
run-owasp-dependency-check:
@@ -32,6 +32,7 @@ jobs:
name: Check ${{ matrix.branch }}
env:
JOB_NAME: Check ${{ matrix.branch }}
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 45
strategy:
@@ -39,6 +40,7 @@ jobs:
matrix:
include:
- branch: master
+ - branch: branch-3.0
- branch: branch-2.11
- branch: branch-2.10
jdk: 11
@@ -56,12 +58,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- if: ${{ matrix.branch == 'master' }}
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Cache local Maven repository
uses: actions/cache@v3
timeout-minutes: 5
diff --git a/.github/workflows/pulsar-ci-flaky.yaml b/.github/workflows/pulsar-ci-flaky.yaml
index acfa66ff43c74..555ebdb17292f 100644
--- a/.github/workflows/pulsar-ci-flaky.yaml
+++ b/.github/workflows/pulsar-ci-flaky.yaml
@@ -36,7 +36,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
# defines the retention period for the intermediate build artifacts needed for rerunning a failed build job
# it's possible to rerun individual failed jobs when the build artifacts are available
# if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR
@@ -94,6 +94,7 @@ jobs:
env:
JOB_NAME: Flaky tests suite
COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}"
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 100
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
@@ -104,11 +105,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
diff --git a/.github/workflows/pulsar-ci.yaml b/.github/workflows/pulsar-ci.yaml
index 721a1d2eafc72..57b9b082da266 100644
--- a/.github/workflows/pulsar-ci.yaml
+++ b/.github/workflows/pulsar-ci.yaml
@@ -36,7 +36,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
# defines the retention period for the intermediate build artifacts needed for rerunning a failed build job
# it's possible to rerun individual failed jobs when the build artifacts are available
# if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR
@@ -95,6 +95,7 @@ jobs:
name: Build and License check
env:
JOB_NAME: Build and License check
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 60
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
@@ -105,11 +106,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -175,6 +171,7 @@ jobs:
env:
JOB_NAME: CI - Unit - ${{ matrix.name }}
COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}"
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: ${{ matrix.timeout || 60 }}
needs: ['preconditions', 'build-and-license-check']
@@ -201,6 +198,10 @@ jobs:
- name: Pulsar IO
group: PULSAR_IO
timeout: 75
+ - name: Pulsar IO - Elastic Search
+ group: PULSAR_IO_ELASTIC
+ - name: Pulsar IO - Kafka Connect Adaptor
+ group: PULSAR_IO_KAFKA_CONNECT
- name: Pulsar Client
group: CLIENT
@@ -211,11 +212,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -391,6 +387,8 @@ jobs:
timeout-minutes: 60
needs: ['preconditions', 'build-and-license-check']
if: ${{ needs.preconditions.outputs.docs_only != 'true'}}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -398,11 +396,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -469,6 +462,7 @@ jobs:
env:
JOB_NAME: CI - Integration - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/java-test-image:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -513,11 +507,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -721,11 +710,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Install gh-actions-artifact-client.js
uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master
@@ -739,6 +723,8 @@ jobs:
timeout-minutes: 60
needs: ['preconditions', 'build-and-license-check']
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -746,11 +732,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -798,6 +779,7 @@ jobs:
# build docker image
# include building of Pulsar SQL, Connectors, Offloaders and server distros
mvn -B -am -pl pulsar-sql/presto-distribution,distribution/io,distribution/offloaders,distribution/server,distribution/shell,tests/docker-images/latest-version-image install \
+ -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
-Pmain,docker -Dmaven.test.skip=true -Ddocker.squash=true \
-Dspotbugs.skip=true -Dlicense.skip=true -Dcheckstyle.skip=true -Drat.skip=true
@@ -850,6 +832,7 @@ jobs:
env:
JOB_NAME: CI - System - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -885,11 +868,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1080,6 +1058,7 @@ jobs:
env:
JOB_NAME: CI Flaky - System - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -1097,11 +1076,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1207,11 +1181,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Install gh-actions-artifact-client.js
uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master
@@ -1225,6 +1194,8 @@ jobs:
timeout-minutes: 120
needs: ['preconditions', 'integration-tests']
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -1232,11 +1203,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Cache Maven dependencies
uses: actions/cache@v3
timeout-minutes: 5
@@ -1263,6 +1229,8 @@ jobs:
timeout-minutes: 120
needs: [ 'preconditions', 'integration-tests' ]
if: ${{ needs.preconditions.outputs.need_owasp == 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -1270,11 +1238,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1310,8 +1273,10 @@ jobs:
cd $HOME
$GITHUB_WORKSPACE/build/pulsar_ci_tool.sh restore_tar_from_github_actions_artifacts pulsar-maven-repository-binaries
# Projects dependent on flume, hdfs, hbase, and presto currently excluded from the scan.
- - name: run "clean verify" to trigger dependency check
- run: mvn -q -B -ntp verify -PskipDocker,owasp-dependency-check -DskipTests -pl '!pulsar-sql,!distribution/io,!distribution/offloaders,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb'
+ - name: trigger dependency check
+ run: |
+ mvn -B -ntp verify -PskipDocker,skip-all,owasp-dependency-check -Dcheckstyle.skip=true -DskipTests \
+ -pl '!pulsar-sql,!distribution/server,!distribution/io,!distribution/offloaders,!pulsar-sql/presto-distribution,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb'
- name: Upload report
uses: actions/upload-artifact@v3
diff --git a/.gitignore b/.gitignore
index c584baaa0a0b8..cd00c44200059 100644
--- a/.gitignore
+++ b/.gitignore
@@ -97,4 +97,3 @@ test-reports/
# Gradle Enterprise
.mvn/.gradle-enterprise/
-.mvn/extensions.xml
diff --git a/.idea/icon.svg b/.idea/icon.svg
new file mode 100644
index 0000000000000..bf9b232def4a4
--- /dev/null
+++ b/.idea/icon.svg
@@ -0,0 +1,33 @@
+
+
+
+
\ No newline at end of file
diff --git a/.mvn/ge-extensions.xml b/.mvn/extensions.xml
similarity index 97%
rename from .mvn/ge-extensions.xml
rename to .mvn/extensions.xml
index d462c11389b17..872764f899827 100644
--- a/.mvn/ge-extensions.xml
+++ b/.mvn/extensions.xml
@@ -24,7 +24,7 @@
com.gradlegradle-enterprise-maven-extension
- 1.16.2
+ 1.17.1com.gradle
diff --git a/README.md b/README.md
index 15419a754cfa9..fdbf7c7339b1e 100644
--- a/README.md
+++ b/README.md
@@ -254,7 +254,7 @@ Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/L
## Crypto Notice
-This distribution includes cryptographic software. The country in which you currently reside may have restrictions on the import, possession, use, and/or re-export to another country, of encryption software. BEFORE using any encryption software, please check your country's laws, regulations and policies concerning the import, possession, or use, and re-export of encryption software, to see if this is permitted. See for more information.
+This distribution includes cryptographic software. The country in which you currently reside may have restrictions on the import, possession, use, and/or re-export to another country, of encryption software. BEFORE using any encryption software, please check your country's laws, regulations and policies concerning the import, possession, or use, and re-export of encryption software, to see if this is permitted. See [The Wassenaar Arrangement](http://www.wassenaar.org/) for more information.
The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), has classified this software as Export Commodity Control Number (ECCN) 5D002.C.1, which includes information security software using or performing cryptographic functions with asymmetric algorithms. The form and manner of this Apache Software Foundation distribution makes it eligible for export under the License Exception ENC Technology Software Unrestricted (TSU) exception (see the BIS Export Administration Regulations, Section 740.13) for both object code and source code.
diff --git a/bin/bookkeeper b/bin/bookkeeper
index fb516a98acdc2..0cc07dd49aba5 100755
--- a/bin/bookkeeper
+++ b/bin/bookkeeper
@@ -168,7 +168,7 @@ OPTS="$OPTS -Dlog4j.configurationFile=`basename $BOOKIE_LOG_CONF`"
# Allow Netty to use reflection access
OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bin/function-localrunner b/bin/function-localrunner
index 45a37cb306794..2e0aa0f6dffe2 100755
--- a/bin/function-localrunner
+++ b/bin/function-localrunner
@@ -40,13 +40,15 @@ PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"}
PULSAR_GC=${PULSAR_GC:-"-XX:+UseZGC -XX:+PerfDisableSharedMem -XX:+AlwaysPreTouch"}
# Garbage collection log.
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
-# java version has space, use [[ -n $PARAM ]] to judge if variable exists
-if [[ -n "$IS_JAVA_8" ]]; then
- PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"}
-else
-# After jdk 9, gc log param should config like this. Ignoring version less than jdk 8
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
+if [[ -z "$IS_JAVA_8" ]]; then
+ # >= JDK 9
PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xlog:gc:logs/pulsar_gc_%p.log:time,uptime:filecount=10,filesize=20M"}
+ # '--add-opens' option is not supported in JDK 1.8
+ OPTS="$OPTS --add-opens java.base/sun.net=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED"
+else
+ # == JDK 1.8
+ PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"}
fi
# Extra options to be passed to the jvm
diff --git a/bin/pulsar b/bin/pulsar
index a033de947d4b3..20ed1f7f22b0f 100755
--- a/bin/pulsar
+++ b/bin/pulsar
@@ -291,7 +291,7 @@ OPTS="$OPTS -Dzookeeper.clientTcpKeepAlive=true"
# Allow Netty to use reflection access
OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
@@ -307,6 +307,8 @@ if [[ -z "$IS_JAVA_8" ]]; then
OPTS="$OPTS --add-opens java.management/sun.management=ALL-UNNAMED"
# MBeanStatsGenerator
OPTS="$OPTS --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED"
+ # LinuxInfoUtils
+ OPTS="$OPTS --add-opens java.base/jdk.internal.platform=ALL-UNNAMED"
fi
OPTS="-cp $PULSAR_CLASSPATH $OPTS"
diff --git a/bin/pulsar-admin-common.sh b/bin/pulsar-admin-common.sh
index 8223ac5b3bf24..8aa21c00f634d 100755
--- a/bin/pulsar-admin-common.sh
+++ b/bin/pulsar-admin-common.sh
@@ -91,7 +91,7 @@ PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH"
OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF`"
OPTS="$OPTS -Djava.net.preferIPv4Stack=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bin/pulsar-perf b/bin/pulsar-perf
index 47c02bc3d67d5..bdc1dc1ed8b8c 100755
--- a/bin/pulsar-perf
+++ b/bin/pulsar-perf
@@ -134,7 +134,7 @@ PULSAR_CLASSPATH="$PULSAR_JAR:$PULSAR_CLASSPATH:$PULSAR_EXTRA_CLASSPATH"
PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH"
OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF` -Djava.net.preferIPv4Stack=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bouncy-castle/bc/pom.xml b/bouncy-castle/bc/pom.xml
index 0e2f8db659002..d5882b4659528 100644
--- a/bouncy-castle/bc/pom.xml
+++ b/bouncy-castle/bc/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarbouncy-castle-parent
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/bouncy-castle/bcfips-include-test/pom.xml b/bouncy-castle/bcfips-include-test/pom.xml
index 41ea4590165fe..e8348be9292cd 100644
--- a/bouncy-castle/bcfips-include-test/pom.xml
+++ b/bouncy-castle/bcfips-include-test/pom.xml
@@ -24,7 +24,7 @@
org.apache.pulsarbouncy-castle-parent
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
@@ -85,6 +85,28 @@
true
+
+ maven-resources-plugin
+
+
+ copy-resources
+ test-compile
+
+ copy-resources
+
+
+ ${project.build.testOutputDirectory}/certificate-authority
+ true
+
+
+ ${project.parent.parent.basedir}/tests/certificate-authority
+ false
+
+
+
+
+
+
diff --git a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
index 330d4fbc06897..e8e12838defef 100644
--- a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
+++ b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
@@ -37,11 +37,6 @@
import org.testng.annotations.BeforeMethod;
public class TlsProducerConsumerBase extends ProducerConsumerBase {
- protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem";
- protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem";
- protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem";
- protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem";
- protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem";
private final String clusterName = "use";
@BeforeMethod(alwaysRun = true)
@@ -63,9 +58,9 @@ protected void cleanup() throws Exception {
protected void internalSetUpForBroker() throws Exception {
conf.setBrokerServicePortTls(Optional.of(0));
conf.setWebServicePortTls(Optional.of(0));
- conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH);
- conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH);
- conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH);
+ conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH);
+ conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH);
+ conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH);
conf.setClusterName(clusterName);
conf.setTlsRequireTrustedClientCertOnConnect(true);
Set tlsProtocols = Sets.newConcurrentHashSet();
@@ -81,12 +76,12 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl)
}
ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(lookupUrl)
- .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false)
+ .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false)
.operationTimeout(1000, TimeUnit.MILLISECONDS);
if (addCertificates) {
Map authParams = new HashMap<>();
- authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
- authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+ authParams.put("tlsCertFile", getTlsFileForClient("admin.cert"));
+ authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8"));
clientBuilder.authentication(AuthenticationTls.class.getName(), authParams);
}
pulsarClient = clientBuilder.build();
@@ -94,15 +89,15 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl)
protected void internalSetUpForNamespace() throws Exception {
Map authParams = new HashMap<>();
- authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
- authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+ authParams.put("tlsCertFile", getTlsFileForClient("admin.cert"));
+ authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8"));
if (admin != null) {
admin.close();
}
admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString())
- .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false)
+ .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false)
.authentication(AuthenticationTls.class.getName(), authParams).build());
admin.clusters().createCluster(clusterName, ClusterData.builder()
.serviceUrl(brokerUrl.toString())
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
deleted file mode 100644
index e9be840d3a083..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
+++ /dev/null
@@ -1,72 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number:
- 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:05
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN = CARoot
- Validity
- Not Before: May 30 13:38:24 2022 GMT
- Not After : May 27 13:38:24 2032 GMT
- Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = localhost
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- RSA Public-Key: (2048 bit)
- Modulus:
- 00:af:bf:b7:2d:98:ad:9d:f6:da:a3:13:d4:62:0f:
- 98:be:1c:a2:89:22:ba:6f:d5:fd:1f:67:e3:91:03:
- 98:80:81:0e:ed:d8:f6:70:7f:2c:36:68:3d:53:ea:
- 58:3a:a6:d5:89:66:4b:bd:1e:57:71:13:6d:4b:11:
- e5:40:a5:76:84:24:92:40:58:80:96:c9:1f:2c:c4:
- 55:eb:a3:79:73:70:5c:37:9a:89:ed:2f:ba:6b:e3:
- 82:7c:69:4a:02:54:8b:81:5e:3c:bf:4c:8a:cb:ea:
- 2c:5e:83:e7:b7:10:08:5f:82:58:a3:89:d1:da:92:
- ba:2a:28:ee:30:28:3f:5b:ae:10:71:96:c7:e1:12:
- c5:b0:1a:ad:44:6f:44:3a:11:4a:9a:3c:0f:8d:06:
- 80:7b:34:ef:3f:6c:f4:5e:c5:44:54:1e:c8:dd:c7:
- 80:85:80:d9:68:e6:c6:53:03:77:e1:fe:18:61:07:
- 77:05:4c:ed:59:bc:5d:41:38:6a:ef:5d:a1:b2:60:
- 98:d4:48:28:95:02:8a:0e:fd:cf:7b:1b:d2:11:cc:
- 10:0c:50:73:d7:cc:38:6c:83:dd:79:26:aa:90:c8:
- 9b:84:86:bc:59:e9:62:69:f4:98:1b:c4:80:78:7e:
- a0:1a:81:9d:d2:e1:66:dd:c4:cc:fc:63:04:ac:ec:
- a7:35
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Alternative Name:
- DNS:localhost, IP Address:127.0.0.1
- Signature Algorithm: sha256WithRSAEncryption
- 88:1d:a7:42:a1:1c:87:45:4a:e6:5e:aa:9c:7b:71:2e:5c:9e:
- 11:85:0f:a3:c5:b4:ea:73:9e:b7:61:9d:4a:e9:cd:1a:c5:2e:
- 03:be:a3:2b:b6:12:6a:15:03:04:3f:fb:4a:09:0d:84:0e:dd:
- c0:63:2b:0f:13:fb:1f:98:64:49:48:e7:96:d5:41:c4:ca:94:
- bf:ab:c5:ea:80:2c:ee:1f:ab:12:54:74:f1:f1:56:ea:03:c0:
- 1c:0d:8d:b9:6e:b0:d0:5f:21:c1:d3:e3:45:df:cf:64:69:13:
- 6c:54:79:06:7d:53:46:77:3c:21:cc:c4:6a:5f:f9:9a:07:0f:
- a5:95:20:f0:0e:93:07:48:96:a9:2c:28:50:21:d7:f8:13:4f:
- b8:ca:aa:1f:a6:41:7c:71:1f:ad:11:3f:3d:1e:e9:81:3c:86:
- c1:af:2d:39:a0:13:9f:99:ec:9a:47:44:df:28:02:a7:1d:6a:
- 8d:c0:1e:24:e8:19:fc:1d:dc:67:29:04:be:0a:d6:c5:81:59:
- 27:2c:f5:e5:df:ba:0b:c6:50:e5:b3:bd:73:12:3e:2c:ef:a6:
- 8a:ed:eb:86:9a:45:45:52:a3:44:78:12:60:17:e2:3a:32:92:
- 03:6e:89:89:16:c5:e0:bc:be:a7:cb:93:4b:d8:56:33:a0:a0:
- 53:b2:0d:a5
------BEGIN CERTIFICATE-----
-MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgUwDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz
-MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj
-aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCWxvY2FsaG9zdDCC
-ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK+/ty2YrZ322qMT1GIPmL4c
-ookium/V/R9n45EDmICBDu3Y9nB/LDZoPVPqWDqm1YlmS70eV3ETbUsR5UCldoQk
-kkBYgJbJHyzEVeujeXNwXDeaie0vumvjgnxpSgJUi4FePL9MisvqLF6D57cQCF+C
-WKOJ0dqSuioo7jAoP1uuEHGWx+ESxbAarURvRDoRSpo8D40GgHs07z9s9F7FRFQe
-yN3HgIWA2WjmxlMDd+H+GGEHdwVM7Vm8XUE4au9dobJgmNRIKJUCig79z3sb0hHM
-EAxQc9fMOGyD3XkmqpDIm4SGvFnpYmn0mBvEgHh+oBqBndLhZt3EzPxjBKzspzUC
-AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
-CwUAA4IBAQCIHadCoRyHRUrmXqqce3EuXJ4RhQ+jxbTqc563YZ1K6c0axS4DvqMr
-thJqFQMEP/tKCQ2EDt3AYysPE/sfmGRJSOeW1UHEypS/q8XqgCzuH6sSVHTx8Vbq
-A8AcDY25brDQXyHB0+NF389kaRNsVHkGfVNGdzwhzMRqX/maBw+llSDwDpMHSJap
-LChQIdf4E0+4yqofpkF8cR+tET89HumBPIbBry05oBOfmeyaR0TfKAKnHWqNwB4k
-6Bn8HdxnKQS+CtbFgVknLPXl37oLxlDls71zEj4s76aK7euGmkVFUqNEeBJgF+I6
-MpIDbomJFsXgvL6ny5NL2FYzoKBTsg2l
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem
deleted file mode 100644
index 004bf8e21a7a9..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCvv7ctmK2d9tqj
-E9RiD5i+HKKJIrpv1f0fZ+ORA5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21L
-EeVApXaEJJJAWICWyR8sxFXro3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixe
-g+e3EAhfglijidHakroqKO4wKD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/
-bPRexURUHsjdx4CFgNlo5sZTA3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO
-/c97G9IRzBAMUHPXzDhsg915JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8
-YwSs7Kc1AgMBAAECggEAAaWEK9MwXTiA1+JJrRmETtOp2isPIBkbI/4vLZ6hASM0
-ZpoPxQIMAf58BJs/dF03xu/EaeMs4oxSC9ABG9fxAk/tZtjta3w65Ip6W5jOfHxj
-AMpb3HMEBhq9kDjUTq1IGVAutYQcEMkC3WfS9e4ahfqMpguWgbu6LsbvZFgcL9mv
-pGnKv9YVe6Xk6isvqtq6G1af0rd7c//xF0i0e/qEo83Buok3gLEZOELZbcRxjUYc
-jnyglnXnwkGjuL4E3wgS3l73ZKsb6+AYoqhMPVz8t4/PN3tTrsBJKOSYo8KzIm0U
-ek9T8XmPbP0cuheRxp9Dp8TXJJQZK0N9jz+EL0ogQQKBgQDnavm8GpR4pap9cDOc
-+YI5s823b507pNdSU8elO9gLsP0JlFzv+sqghVko29r85D7Vn3MkgYTy0S4ANLCs
-0NFDY8N2QH6U1dTkk1QXZydVZDuKJ5SSpC4v+Vafl8yDxhB4Nlxhbm9vJEMfLcXh
-2kL6UlAuFDtYD0AdczwnHu5DjQKBgQDCauocm55FpcyDMMBO2CjurxcjBYS3S1xT
-Bz+sPtxJLjlKbAt8kSHUQcCcX9zhrQBfsT38LATCmKaOFqUW5/PPh2LcrxiMqlL1
-OJBUJ3Te2LTjlUn8r+DHv/69UIh5tchwRr3YgB0DuIs7jfmr4VfiOWTBtPVhoGFR
-1Wt60j30SQKBgHzreS26J2VNAFBALgxRf6OIVMbtgDG/FOCDCyU9vazp+F2gcd61
-QYYPFYcBzx9uUiDctroBFHRCyJMh3jEbc6ruAogl3m6XUxmkEeOkMk5dEerM3N2f
-tLL+5Gy385U6aI+LwKhzhcG4EGeXPNdjC362ykNldnddnB2Jo/H2N2XNAoGAdnft
-xpbxP+GDGKIZXTIM5zzcLWQMdiC+1n1BSHVZiGJZWMczzKknYw7aDq+/iekApE79
-xW8RS373ZvfXi3i2Mcx+6pjrrbOQL4tTL2SHq8+DknaDCi4mG7IbyUKMlxW1WO1S
-e929UGogtZ6S+DCte9WbVwosyFuRUetpvgLk67kCgYBWetihZjgBWrqVYT24TTRH
-KxzSzH1JgzzF9qgTdlhXDv9hC+Kc0uTKsgViesDqVuCOjkwzY5OQr9c6duO0fwwP
-qNk/qltdgjMC5iiv7duyukfbEuqKEdGGer9HFb7en96dZdVQJpYHaaslAGurtD80
-ejCQZgzR2XaHSuIQb0IUVQ==
------END PRIVATE KEY-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
deleted file mode 100644
index 21bbaba213f69..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
+++ /dev/null
@@ -1,77 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number:
- 70:4c:6b:e0:aa:cc:01:77:f2:1f:04:8c:d4:72:03:a5:32:5f:c7:be
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN = CARoot
- Validity
- Not Before: May 30 13:38:24 2022 GMT
- Not After : May 27 13:38:24 2032 GMT
- Subject: CN = CARoot
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- RSA Public-Key: (2048 bit)
- Modulus:
- 00:dc:9c:01:30:5f:c5:42:48:10:78:30:5d:66:20:
- 0e:74:61:f6:82:74:9f:6f:b2:ed:00:9e:6c:21:b6:
- 83:21:6b:54:34:e8:a9:dc:81:83:7a:0e:9f:cc:3d:
- eb:97:ee:cf:ca:0e:5f:96:81:dc:e7:75:88:91:2f:
- d5:65:74:c2:d8:67:58:d8:41:6a:5f:a9:79:dc:29:
- 36:4a:b8:39:20:d2:f8:a8:59:9f:e3:be:f9:61:80:
- 1b:ce:63:bb:12:56:06:b9:77:4e:6a:40:65:9b:bf:
- 5b:f8:27:88:f5:ff:40:ee:47:bc:2d:8e:c3:a6:62:
- 0d:18:76:d1:f5:af:1a:6b:25:4e:d4:55:15:f0:e3:
- 97:1b:68:eb:75:b8:80:ea:64:ef:7e:e2:f0:5c:da:
- 6d:d6:16:7b:0f:5e:ae:72:47:5a:df:0b:8a:e0:74:
- c1:b7:82:0d:97:41:d7:84:16:51:40:37:15:a1:eb:
- 70:0c:f1:5a:26:39:11:1e:97:b9:36:32:ce:16:b9:
- 42:ad:31:5b:1e:89:f5:3e:07:0e:d6:fc:9a:46:8e:
- 87:89:90:5c:f3:00:e4:9b:ce:7b:93:fe:9a:d8:65:
- ec:49:5c:e8:eb:41:3d:53:bc:ce:e8:6d:44:ec:76:
- 3f:e6:9b:13:e4:f8:d0:1c:00:e6:4f:73:e1:b0:27:
- 6f:99
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Key Identifier:
- 8B:30:D2:81:7C:BE:AB:4D:76:37:19:2B:69:5E:DB:F7:81:95:73:F5
- X509v3 Authority Key Identifier:
- keyid:8B:30:D2:81:7C:BE:AB:4D:76:37:19:2B:69:5E:DB:F7:81:95:73:F5
-
- X509v3 Basic Constraints: critical
- CA:TRUE
- Signature Algorithm: sha256WithRSAEncryption
- 02:4c:80:4f:a4:b5:f4:70:be:82:cf:3a:ed:40:f9:97:17:22:
- 07:5d:e0:9b:4e:54:f8:4b:64:99:f5:07:7f:87:5b:9c:60:ec:
- 9f:69:e6:00:97:5a:cd:14:59:31:45:be:b7:bd:c4:ce:57:82:
- 1a:4a:62:ce:8e:c8:59:d5:62:43:8b:94:c0:ab:c2:cc:3a:a0:
- 69:d3:65:15:82:35:de:85:64:e6:7b:d9:3a:22:12:77:f7:71:
- 82:86:d7:6c:e5:69:d5:3a:f2:a7:25:f7:dc:f3:6f:cb:eb:85:
- 48:44:63:e2:6d:3c:82:eb:3a:c0:e1:bd:9d:3a:12:11:66:1f:
- 05:8f:49:65:31:d6:cf:26:06:46:ba:73:c7:ad:61:fc:14:5f:
- 68:d1:ee:02:5f:4b:98:b6:5b:0c:98:4e:61:7b:cb:35:ee:44:
- a1:ce:e1:00:a2:56:f0:0d:72:3b:58:66:e8:9a:dc:62:d5:95:
- 3e:5a:48:21:a8:7c:f8:1f:5a:13:db:53:33:11:3e:e6:14:39:
- cd:2b:3f:77:5b:ee:f7:0c:59:69:2f:46:9a:34:56:89:05:8e:
- 40:94:94:3f:95:f6:fa:f9:1a:e8:1a:80:7b:1d:f7:0c:a1:be:
- e2:38:98:fd:0f:e7:68:4d:7d:fe:ae:5f:e3:32:c6:5d:37:77:
- 7a:28:ce:cc
------BEGIN CERTIFICATE-----
-MIIDAzCCAeugAwIBAgIUcExr4KrMAXfyHwSM1HIDpTJfx74wDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz
-MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEA3JwBMF/FQkgQeDBdZiAOdGH2gnSfb7LtAJ5sIbaDIWtUNOip3IGD
-eg6fzD3rl+7Pyg5floHc53WIkS/VZXTC2GdY2EFqX6l53Ck2Srg5INL4qFmf4775
-YYAbzmO7ElYGuXdOakBlm79b+CeI9f9A7ke8LY7DpmINGHbR9a8aayVO1FUV8OOX
-G2jrdbiA6mTvfuLwXNpt1hZ7D16uckda3wuK4HTBt4INl0HXhBZRQDcVoetwDPFa
-JjkRHpe5NjLOFrlCrTFbHon1PgcO1vyaRo6HiZBc8wDkm857k/6a2GXsSVzo60E9
-U7zO6G1E7HY/5psT5PjQHADmT3PhsCdvmQIDAQABo1MwUTAdBgNVHQ4EFgQUizDS
-gXy+q012NxkraV7b94GVc/UwHwYDVR0jBBgwFoAUizDSgXy+q012NxkraV7b94GV
-c/UwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAAkyAT6S19HC+
-gs867UD5lxciB13gm05U+EtkmfUHf4dbnGDsn2nmAJdazRRZMUW+t73EzleCGkpi
-zo7IWdViQ4uUwKvCzDqgadNlFYI13oVk5nvZOiISd/dxgobXbOVp1TrypyX33PNv
-y+uFSERj4m08gus6wOG9nToSEWYfBY9JZTHWzyYGRrpzx61h/BRfaNHuAl9LmLZb
-DJhOYXvLNe5Eoc7hAKJW8A1yO1hm6JrcYtWVPlpIIah8+B9aE9tTMxE+5hQ5zSs/
-d1vu9wxZaS9GmjRWiQWOQJSUP5X2+vka6BqAex33DKG+4jiY/Q/naE19/q5f4zLG
-XTd3eijOzA==
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
deleted file mode 100644
index e5d9e6e74b233..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
+++ /dev/null
@@ -1,72 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number:
- 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:06
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN = CARoot
- Validity
- Not Before: May 30 13:38:24 2022 GMT
- Not After : May 27 13:38:24 2032 GMT
- Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = superUser
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- RSA Public-Key: (2048 bit)
- Modulus:
- 00:cd:43:7d:98:40:f9:b0:5b:bc:ae:db:c0:0b:ad:
- 26:90:96:e0:62:38:ed:68:b1:70:46:3b:de:44:f9:
- 14:51:86:10:eb:ca:90:e7:88:e8:f9:91:85:e0:dd:
- b5:b4:14:b9:78:e3:86:d5:54:6d:68:ec:14:92:b4:
- f8:22:5b:05:3d:ed:31:25:65:08:05:84:ca:e6:0c:
- 21:12:58:32:c7:1a:60:a3:4f:d2:4a:9e:28:19:7c:
- 45:84:00:8c:89:dc:de:8a:e5:4f:88:91:cc:a4:f1:
- 81:45:4c:7d:c2:ff:e2:c1:89:c6:12:73:95:e2:36:
- bd:db:ae:8b:5a:68:6a:90:51:de:2b:88:5f:aa:67:
- f4:a8:e3:63:dc:be:19:82:cc:9d:7f:e6:8d:fb:82:
- be:22:01:3d:56:13:3b:5b:04:b4:e8:c5:18:e6:2e:
- 0d:fa:ba:4a:8d:e8:c6:5a:a1:51:9a:4a:62:d7:af:
- dd:b4:fc:e2:d5:cd:ae:99:6c:5c:61:56:0b:d7:0c:
- 1a:77:5c:f5:3a:6a:54:b5:9e:33:ac:a9:75:28:9a:
- 76:af:d0:7a:57:00:1b:91:13:31:fd:42:88:21:47:
- 05:10:01:2f:59:bb:c7:3a:d9:e1:58:4c:1b:6c:71:
- b6:98:ef:dd:03:82:58:a3:32:dc:90:a1:b6:a6:1e:
- e1:0b
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Alternative Name:
- DNS:localhost, IP Address:127.0.0.1
- Signature Algorithm: sha256WithRSAEncryption
- 90:62:ba:7b:6f:45:95:7a:71:2f:e7:88:0c:64:b8:6c:05:86:
- 7f:47:08:ce:d6:e2:5a:32:13:0c:82:ad:a7:af:f0:a2:f7:86:
- 79:87:1a:89:78:95:b1:9f:be:c5:8b:39:fd:12:94:b6:e1:69:
- ff:fa:1e:c3:82:d8:6c:03:80:45:ac:1c:06:70:bb:77:c3:41:
- 5f:b6:9d:fe:36:6f:ae:23:6c:bf:43:79:8e:74:85:8e:96:89:
- a9:c4:6d:d9:fa:05:ba:a8:11:7c:82:45:94:3d:9f:b6:7c:2f:
- 4e:6d:37:c3:fb:79:7e:0c:d2:15:fa:0e:ea:2d:c9:24:f3:34:
- 13:6f:db:d7:55:e1:0c:2f:7e:fe:4c:3b:fa:7e:03:26:0f:6a:
- 95:d2:22:ce:27:71:6a:97:ac:36:0a:20:ec:19:a0:78:23:0c:
- 54:f3:b1:dd:33:36:7c:b7:61:23:70:8f:7f:c8:5f:e8:9e:b5:
- 02:31:4d:b3:40:b0:7b:b2:ee:14:a7:69:22:8b:38:85:5d:04:
- 6e:d5:44:41:31:a7:4b:71:86:fb:81:cd:3d:db:96:23:0b:bc:
- e1:67:46:0e:87:86:91:4e:1a:35:37:af:a4:ac:9a:de:e3:4f:
- 82:47:f1:c4:16:58:11:8f:76:d2:4d:df:a1:c6:a2:8f:33:6d:
- 72:15:28:76
------BEGIN CERTIFICATE-----
-MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgYwDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz
-MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj
-aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCXN1cGVyVXNlcjCC
-ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM1DfZhA+bBbvK7bwAutJpCW
-4GI47WixcEY73kT5FFGGEOvKkOeI6PmRheDdtbQUuXjjhtVUbWjsFJK0+CJbBT3t
-MSVlCAWEyuYMIRJYMscaYKNP0kqeKBl8RYQAjInc3orlT4iRzKTxgUVMfcL/4sGJ
-xhJzleI2vduui1poapBR3iuIX6pn9KjjY9y+GYLMnX/mjfuCviIBPVYTO1sEtOjF
-GOYuDfq6So3oxlqhUZpKYtev3bT84tXNrplsXGFWC9cMGndc9TpqVLWeM6ypdSia
-dq/QelcAG5ETMf1CiCFHBRABL1m7xzrZ4VhMG2xxtpjv3QOCWKMy3JChtqYe4QsC
-AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
-CwUAA4IBAQCQYrp7b0WVenEv54gMZLhsBYZ/RwjO1uJaMhMMgq2nr/Ci94Z5hxqJ
-eJWxn77Fizn9EpS24Wn/+h7DgthsA4BFrBwGcLt3w0Fftp3+Nm+uI2y/Q3mOdIWO
-lompxG3Z+gW6qBF8gkWUPZ+2fC9ObTfD+3l+DNIV+g7qLckk8zQTb9vXVeEML37+
-TDv6fgMmD2qV0iLOJ3Fql6w2CiDsGaB4IwxU87HdMzZ8t2EjcI9/yF/onrUCMU2z
-QLB7su4Up2kiiziFXQRu1URBMadLcYb7gc0925YjC7zhZ0YOh4aRTho1N6+krJre
-40+CR/HEFlgRj3bSTd+hxqKPM21yFSh2
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem
deleted file mode 100644
index 3835b3eacccc0..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDNQ32YQPmwW7yu
-28ALrSaQluBiOO1osXBGO95E+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSS
-tPgiWwU97TElZQgFhMrmDCESWDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFF
-TH3C/+LBicYSc5XiNr3brotaaGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1W
-EztbBLToxRjmLg36ukqN6MZaoVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1
-njOsqXUomnav0HpXABuREzH9QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQ
-obamHuELAgMBAAECggEBALGnokJuqiz7mTj2NSdl+6TVEOuyPbiJKpV/J4cm1XEh
-ye9qaTQcCRhH3UmcWrG75jM9KevloLRY8A1x1/lUMhtA+XJWGTU9k6a8BLut3nT4
-3X87jNTMQgSczEXNe9WudmZcxhN7rVVtOOdTpt1pP0cnCWna5HTf0D8cuLvM975j
-r1YGTjKsCF1W+tp6ZAIIMfJkUI2qBRKvSxVCSs1vZBraox3yUVnq9oRLHxZZoqOd
-d51G5phRtn6ReVPBdT8fGUBEGg3jKxTu2/vLQMUyHy0hyCAM20gzOP4FIc2g+QZU
-y42byAuc89m0OrdRWsmzHCOxcq9DwY9npaz1RscR/2ECgYEA9bHJQ0Y1afpS5gn2
-KnXenRIw9oal1utQZnohCEJ4um+K/BCEHtDnI825LPNf34IKM2rSmssvHrYN51o0
-92j9lHHXsf6MVluwsTsIu8MtNaJ1BLt96dub4ScGT6vvzObKTwsajUfIHk+FNsKq
-zps8yh1q0qyyfAcvR82+Xr6JIsMCgYEA1d+RHGewi/Ub/GCG99A1KFKsgbiIJnWB
-IFmrcyPWignhzDUcw2SV9XqAzeK8EOIHNq3e5U/tkA7aCWxtLb5UsQ8xvmwQY2cy
-X2XvSdIhO4K2PgRLgjlzZ8RHSULglqyjB2i6TjwjFl8TsRzYr6JlV6+2cMujw4Bl
-g3a8gz071BkCgYBLP7BMkmw5kRliqxph1sffg3rLhmG0eU2elTkYtoMTVqZSnRxZ
-89FW/eMBCWkLo2BMbyMhlalQ1qFbgh1GyTkhBdzx/uwsZtiu7021dAmcq6z7ThE6
-VrBfPPyJ2jcPon/DxbrUGnAIGILMSsLVlGYB4RCehZYEto6chz8O9Xw60QKBgCnd
-us1BqviqwZC04JbQJie/j09RbS2CIQXRJ9PBNzUMXCwaVYgWP5ivI1mqQcBYTqsw
-fAqNi+aAUcQ4emLS+Ec0vzsUclzTDbRJAv+DZ8f7fWtEcfeLAYFVldLMiaRVJRDF
-OnsoIII3mGY6TFyNQKNanS8VXfheQQDsFFjoera5AoGBALXYEXkESXpw4LT6qJFz
-ktQuTZDfS6LtR14/+NkYL9c5wBC4Otkg4bNbT8xGlUjethRfpkm8xRTB6zfC1/p/
-Cg6YU1cwqlkRurAhE3PEv1dCc1IDbzou8xnwqHrd6sGPDQmQ3aEtU5eJhDZKIZfx
-nQqPGK92+Jtne7+W1mFZooxs
------END PRIVATE KEY-----
diff --git a/bouncy-castle/bcfips/pom.xml b/bouncy-castle/bcfips/pom.xml
index 7feee8c27afd0..a07e5e19907f2 100644
--- a/bouncy-castle/bcfips/pom.xml
+++ b/bouncy-castle/bcfips/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarbouncy-castle-parent
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/bouncy-castle/pom.xml b/bouncy-castle/pom.xml
index 46fb1db1bc807..daefeb83b5371 100644
--- a/bouncy-castle/pom.xml
+++ b/bouncy-castle/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/build/build_java_test_image.sh b/build/build_java_test_image.sh
index 0747e6dacb82a..459bf26f98eff 100755
--- a/build/build_java_test_image.sh
+++ b/build/build_java_test_image.sh
@@ -27,5 +27,6 @@ if [[ "$(docker version -f '{{.Server.Experimental}}' 2>/dev/null)" == "true" ]]
SQUASH_PARAM="-Ddocker.squash=true"
fi
mvn -am -pl tests/docker-images/java-test-image -Pcore-modules,-main,integrationTests,docker \
+ -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
-Dmaven.test.skip=true -DskipSourceReleaseAssembly=true -Dspotbugs.skip=true -Dlicense.skip=true $SQUASH_PARAM \
"$@" install
\ No newline at end of file
diff --git a/build/docker/Dockerfile b/build/docker/Dockerfile
index 7660325567748..674167b326378 100644
--- a/build/docker/Dockerfile
+++ b/build/docker/Dockerfile
@@ -17,7 +17,7 @@
# under the License.
#
-FROM ubuntu:20.04
+FROM ubuntu:22.04
# prepare the directory for pulsar related files
RUN mkdir /pulsar
@@ -80,7 +80,7 @@ RUN dpkg -i crowdin.deb
# Install PIP
RUN curl https://bootstrap.pypa.io/get-pip.py | python3 -
-RUN pip3 install pdoc
+RUN pip3 --no-cache-dir install pdoc
#
# Installation
ARG MAVEN_VERSION=3.6.3
diff --git a/build/pulsar_ci_tool.sh b/build/pulsar_ci_tool.sh
index 61199eda2c5d8..d946edd395789 100755
--- a/build/pulsar_ci_tool.sh
+++ b/build/pulsar_ci_tool.sh
@@ -46,7 +46,8 @@ function ci_print_thread_dumps() {
# runs maven
function _ci_mvn() {
- mvn -B -ntp "$@"
+ mvn -B -ntp -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
+ "$@"
}
# runs OWASP Dependency Check for all projects
diff --git a/build/regenerate_certs_for_tests.sh b/build/regenerate_certs_for_tests.sh
index fb0274cc19316..9582a7496cd1d 100755
--- a/build/regenerate_certs_for_tests.sh
+++ b/build/regenerate_certs_for_tests.sh
@@ -34,7 +34,16 @@ function reissue_certificate() {
keyfile=$1
certfile=$2
openssl x509 -x509toreq -in $certfile -signkey $keyfile -out ${certfile}.csr
- openssl x509 -req -CA ca-cert.pem -CAkey ca-key -in ${certfile}.csr -text -outform pem -out $certfile -days 3650 -CAcreateserial -extfile <(printf "subjectAltName = DNS:localhost, IP:127.0.0.1")
+ openssl x509 -req -CA ca-cert.pem -CAkey ca-key -in ${certfile}.csr -text -outform pem -days 3650 -sha256 -CAcreateserial -extfile <(printf "subjectAltName = DNS:localhost, IP:127.0.0.1") > $certfile
+ rm ${certfile}.csr
+}
+
+function reissue_certificate_no_subject() {
+ keyfile=$1
+ certfile=$2
+ openssl x509 -x509toreq -in $certfile -signkey $keyfile -out ${certfile}.csr
+ openssl x509 -req -CA ca-cert.pem -CAkey ca-key -in ${certfile}.csr -text -outform pem -days 3650 -sha256 -CAcreateserial > $certfile
+ rm ${certfile}.csr
}
generate_ca
@@ -54,12 +63,10 @@ cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/Prox
reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-key.pem \
$ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem
-generate_ca
-cp ca-cert.pem $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
-reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem \
- $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
-reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem \
- $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
+# Use $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/cacert.pem as trusted cert
+reissue_certificate_no_subject \
+ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-key.pem \
+ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-cert.pem
generate_ca
cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem
diff --git a/build/run_unit_group.sh b/build/run_unit_group.sh
index af3ce57d27bd2..69434b011b37e 100755
--- a/build/run_unit_group.sh
+++ b/build/run_unit_group.sh
@@ -179,7 +179,6 @@ function test_group_other() {
}
function test_group_pulsar_io() {
- $MVN_TEST_OPTIONS -pl kafka-connect-avro-converter-shaded clean install
echo "::group::Running pulsar-io tests"
mvn_test --install -Ppulsar-io-tests,-main
echo "::endgroup::"
@@ -189,6 +188,18 @@ function test_group_pulsar_io() {
echo "::endgroup::"
}
+function test_group_pulsar_io_elastic() {
+ echo "::group::Running elastic-search tests"
+ mvn_test --install -Ppulsar-io-elastic-tests,-main
+ echo "::endgroup::"
+}
+
+function test_group_pulsar_io_kafka_connect() {
+ echo "::group::Running Pulsar IO Kafka connect adaptor tests"
+ mvn_test --install -Ppulsar-io-kafka-connect-tests,-main
+ echo "::endgroup::"
+}
+
function list_test_groups() {
declare -F | awk '{print $NF}' | sort | grep -E '^test_group_' | sed 's/^test_group_//g' | tr '[:lower:]' '[:upper:]'
}
diff --git a/buildtools/pom.xml b/buildtools/pom.xml
index c2378fc2fb01a..329eb9de6b552 100644
--- a/buildtools/pom.xml
+++ b/buildtools/pom.xml
@@ -25,38 +25,39 @@
org.apacheapache
- 23
+ 29
+ org.apache.pulsarbuildtools
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOTjarPulsar Build Tools
- ${maven.build.timestamp}
+ 2023-05-03T02:53:27Z1.81.8
- 3.0.0-M3
+ 3.1.02.18.01.7.32
- 7.7.0
+ 7.7.13.114.1
- 3.4.08.373.1.2
- 4.1.86.Final
+ 4.1.93.Final4.2.3
- 31.0.1-jre
+ 32.0.0-jre1.10.12
- 1.32
+ 2.03.12.4
--add-opens java.base/jdk.internal.loader=ALL-UNNAMED
--add-opens java.base/java.lang=ALL-UNNAMED
+ --add-opens java.base/jdk.internal.platform=ALL-UNNAMED
@@ -175,18 +176,24 @@
listener
- org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier
+ org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.JacocoDumpListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier
${test.additional.args}
+
+
+ org.apache.maven.surefire
+ surefire-testng
+ ${surefire.version}
+
+ org.apache.maven.pluginsmaven-shade-plugin
- ${maven-shade-plugin.version}truetrue
@@ -254,7 +261,7 @@
org.apache.maven.wagonwagon-ssh-external
- 2.10
+ 3.5.3
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
index 627a4ec30547b..fe76a79b2c4ce 100644
--- a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
@@ -124,8 +124,7 @@ public void beforeInvocation(IInvokedMethod iInvokedMethod, ITestResult iTestRes
|| iTestNGMethod.isAfterTestConfiguration())) {
throw new FailFastSkipException("Skipped after failure since testFailFast system property is set.");
}
- }
- if (FAIL_FAST_KILLSWITCH_FILE != null && FAIL_FAST_KILLSWITCH_FILE.exists()) {
+ } else if (FAIL_FAST_KILLSWITCH_FILE != null && FAIL_FAST_KILLSWITCH_FILE.exists()) {
throw new FailFastSkipException("Skipped after failure since kill switch file exists.");
}
}
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java
new file mode 100644
index 0000000000000..2c49d5118ae52
--- /dev/null
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.tests;
+
+import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeUnit;
+import javax.management.InstanceNotFoundException;
+import javax.management.MBeanServer;
+import javax.management.MBeanServerInvocationHandler;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import org.testng.IExecutionListener;
+import org.testng.ISuite;
+import org.testng.ISuiteListener;
+
+/**
+ * A TestNG listener that dumps Jacoco coverage data to file using the Jacoco JMX interface.
+ *
+ * This ensures that coverage data is dumped even if the shutdown sequence of the Test JVM gets stuck. Coverage
+ * data will be dumped every 2 minutes by default and once all test suites have been run.
+ * Each test class runs in its own suite when run with maven-surefire-plugin.
+ */
+public class JacocoDumpListener implements ISuiteListener, IExecutionListener {
+ private final MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
+ private final ObjectName jacocoObjectName;
+ private final JacocoProxy jacocoProxy;
+ private final boolean enabled;
+
+ private long lastDumpTime;
+
+ private static final long DUMP_INTERVAL_MILLIS = TimeUnit.SECONDS.toMillis(120);
+
+ public JacocoDumpListener() {
+ try {
+ jacocoObjectName = new ObjectName("org.jacoco:type=Runtime");
+ } catch (MalformedObjectNameException e) {
+ // this won't happen since the ObjectName is static and valid
+ throw new RuntimeException(e);
+ }
+ enabled = checkEnabled();
+ if (enabled) {
+ jacocoProxy = MBeanServerInvocationHandler.newProxyInstance(platformMBeanServer, jacocoObjectName,
+ JacocoProxy.class, false);
+ } else {
+ jacocoProxy = null;
+ }
+ lastDumpTime = System.currentTimeMillis();
+ }
+
+ private boolean checkEnabled() {
+ try {
+ platformMBeanServer.getObjectInstance(jacocoObjectName);
+ } catch (InstanceNotFoundException e) {
+ // jacoco jmx is not enabled
+ return false;
+ }
+ return true;
+ }
+
+ public void onFinish(ISuite suite) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface if more than DUMP_INTERVAL_MILLIS has passed
+ // since the last dump
+ if (enabled && System.currentTimeMillis() - lastDumpTime > DUMP_INTERVAL_MILLIS) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface
+ triggerJacocoDump();
+ }
+ }
+ @Override
+ public void onExecutionFinish() {
+ if (enabled) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface when all tests have finished
+ triggerJacocoDump();
+ }
+ }
+
+ private void triggerJacocoDump() {
+ System.out.println("Dumping Jacoco coverage data to file...");
+ long start = System.currentTimeMillis();
+ jacocoProxy.dump(true);
+ lastDumpTime = System.currentTimeMillis();
+ System.out.println("Completed in " + (lastDumpTime - start) + "ms.");
+ }
+
+ public interface JacocoProxy {
+ void dump(boolean reset);
+ }
+}
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
index b3d70621843ca..2d1f1273272c5 100644
--- a/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
@@ -44,20 +44,29 @@ public void onTestFailure(ITestResult result) {
if (!(result.getThrowable() instanceof SkipException)) {
System.out.format("!!!!!!!!! FAILURE-- %s.%s(%s)-------\n", result.getTestClass(),
result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
- }
- if (result.getThrowable() != null) {
- result.getThrowable().printStackTrace();
- if (result.getThrowable() instanceof ThreadTimeoutException) {
- System.out.println("====== THREAD DUMPS ======");
- System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ if (result.getThrowable() != null) {
+ result.getThrowable().printStackTrace();
+ if (result.getThrowable() instanceof ThreadTimeoutException) {
+ System.out.println("====== THREAD DUMPS ======");
+ System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ }
}
}
}
@Override
public void onTestSkipped(ITestResult result) {
- System.out.format("~~~~~~~~~ SKIPPED -- %s.%s(%s)-------\n", result.getTestClass(),
- result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
+ if (!(result.getThrowable() instanceof SkipException)) {
+ System.out.format("~~~~~~~~~ SKIPPED -- %s.%s(%s)-------\n", result.getTestClass(),
+ result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
+ if (result.getThrowable() != null) {
+ result.getThrowable().printStackTrace();
+ if (result.getThrowable() instanceof ThreadTimeoutException) {
+ System.out.println("====== THREAD DUMPS ======");
+ System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ }
+ }
+ }
}
@Override
diff --git a/buildtools/src/main/resources/pulsar/checkstyle.xml b/buildtools/src/main/resources/pulsar/checkstyle.xml
index b5141cc5eb51a..b3812ca8cccd7 100644
--- a/buildtools/src/main/resources/pulsar/checkstyle.xml
+++ b/buildtools/src/main/resources/pulsar/checkstyle.xml
@@ -148,7 +148,7 @@ page at http://checkstyle.sourceforge.net/config.html -->
diff --git a/buildtools/src/main/resources/pulsar/suppressions.xml b/buildtools/src/main/resources/pulsar/suppressions.xml
index 7c78988db3e90..57a01c60f6a27 100644
--- a/buildtools/src/main/resources/pulsar/suppressions.xml
+++ b/buildtools/src/main/resources/pulsar/suppressions.xml
@@ -38,7 +38,7 @@
-
+
diff --git a/conf/broker.conf b/conf/broker.conf
index 746ef9c6c3fdc..22ca71864e9cd 100644
--- a/conf/broker.conf
+++ b/conf/broker.conf
@@ -85,6 +85,7 @@ advertisedAddress=
# internalListenerName=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Number of threads to config Netty Acceptor. Default is 1
@@ -549,13 +550,11 @@ delayedDeliveryTrackerFactoryClassName=org.apache.pulsar.broker.delayed.InMemory
# Control the tick time for when retrying on delayed delivery,
# affecting the accuracy of the delivery time compared to the scheduled time.
-# Note that this time is used to configure the HashedWheelTimer's tick time for the
-# InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory).
+# Note that this time is used to configure the HashedWheelTimer's tick time.
# Default is 1 second.
delayedDeliveryTickTimeMillis=1000
-# When using the InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory), whether
-# the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
+# Whether the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
# time by as much as the tickTimeMillis. This can reduce the overhead on the broker of maintaining the delayed index
# for a potentially very short time period. When true, messages will not be sent to consumer until the deliverAt time
# has passed, and they may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the
@@ -571,15 +570,17 @@ delayedDeliveryMinIndexCountPerBucket=50000
# after reaching the max time step limitation, the snapshot segment will be cut off.
delayedDeliveryMaxTimeStepPerBucketSnapshotSegmentSeconds=300
+# The max number of delayed message index in per bucket snapshot segment, -1 means no limitation
+# after reaching the max number limitation, the snapshot segment will be cut off.
+delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000
+
# The max number of delayed message index bucket,
# after reaching the max buckets limitation, the adjacent buckets will be merged.
-delayedDeliveryMaxNumBuckets=50
-
-# Enable share the delayed message index across subscriptions
-delayedDeliverySharedIndexEnabled=false
+# (disable with value -1)
+delayedDeliveryMaxNumBuckets=-1
# Size of the lookahead window to use when detecting if all the messages in the topic
-# have a fixed delay.
+# have a fixed delay for InMemoryDelayedDeliveryTracker (the default DelayedDeliverTracker).
# Default is 50,000. Setting the lookahead window to 0 will disable the logic to handle
# fixed delays in messages in a different way.
delayedDeliveryFixedDelayDetectionLookahead=50000
@@ -680,6 +681,9 @@ tlsTrustCertsFilePath=
# though the cert will not be used for client authentication.
tlsAllowInsecureConnection=false
+# Whether the hostname is validated when the broker creates a TLS connection with other brokers
+tlsHostnameVerificationEnabled=false
+
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# Examples:
@@ -899,6 +903,11 @@ saslJaasServerRoleTokenSignerSecretPath=
# If >0, it will reject all HTTP requests with bodies larged than the configured limit
httpMaxRequestSize=-1
+# The maximum size in bytes of the request header. Larger headers will allow for more and/or larger cookies plus larger
+# form content encoded in a URL.However, larger headers consume more memory and can make a server more vulnerable to
+# denial of service attacks.
+httpMaxRequestHeaderSize = 8192
+
# If true, the broker will reject all HTTP requests using the TRACE and TRACK verbs.
# This setting may be necessary if the broker is deployed into an environment that uses http port
# scanning and flags web servers allowing the TRACE method as insecure.
@@ -1055,6 +1064,9 @@ bookkeeperTLSTrustCertsFilePath=
# Tls cert refresh duration at bookKeeper-client in seconds (0 to disable check)
bookkeeperTlsCertFilesRefreshDurationSeconds=300
+# Whether the hostname is validated when the broker creates a TLS connection to a bookkeeper
+bookkeeper_tlsHostnameVerificationEnabled=false
+
# Enable/disable disk weight based placement. Default is false
bookkeeperDiskWeightBasedPlacementEnabled=false
@@ -1066,8 +1078,8 @@ bookkeeperExplicitLacIntervalInMills=0
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
@@ -1308,6 +1320,9 @@ defaultNamespaceBundleSplitAlgorithm=range_equally_divide
# load shedding strategy, support OverloadShedder and ThresholdShedder, default is ThresholdShedder since 2.10.0
loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.ThresholdShedder
+# If enabled, when current usage < average usage - threshold, the broker with the highest load will be triggered to unload.
+lowerBoundarySheddingEnabled=false
+
# load balance placement strategy, support LeastLongTermMessageRate and LeastResourceUsageWithWeight
loadBalancerLoadPlacementStrategy=org.apache.pulsar.broker.loadbalance.impl.LeastLongTermMessageRate
@@ -1352,10 +1367,6 @@ loadBalancerBandwithOutResourceWeight=1.0
# It only takes effect in the ThresholdShedder strategy.
loadBalancerCPUResourceWeight=1.0
-# The heap memory usage weight when calculating new resource usage.
-# It only takes effect in the ThresholdShedder strategy.
-loadBalancerMemoryResourceWeight=1.0
-
# The direct memory usage weight when calculating new resource usage.
# It only takes effect in the ThresholdShedder strategy.
loadBalancerDirectMemoryResourceWeight=1.0
@@ -1367,6 +1378,91 @@ loadBalancerBundleUnloadMinThroughputThreshold=10
# Time to wait for the unloading of a namespace bundle
namespaceBundleUnloadingTimeoutMs=60000
+### --- Load balancer extension --- ###
+
+# Option to enable the debug mode for the load balancer logics.
+# The debug mode prints more logs to provide more information such as load balance states and decisions.
+# (only used in load balancer extension logics)
+loadBalancerDebugModeEnabled=false
+
+# The target standard deviation of the resource usage across brokers
+# (100% resource usage is 1.0 load).
+# The shedder logic tries to distribute bundle load across brokers to meet this target std.
+# The smaller value will incur load balancing more frequently.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerBrokerLoadTargetStd=0.25
+
+# Threshold to the consecutive count of fulfilled shedding(unload) conditions.
+# If the unload scheduler consecutively finds bundles that meet unload conditions
+# many times bigger than this threshold, the scheduler will shed the bundles.
+# The bigger value will incur less bundle unloading/transfers.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerSheddingConditionHitCountThreshold=3
+
+# Option to enable the bundle transfer mode when distributing bundle loads.
+# On: transfer bundles from overloaded brokers to underloaded
+# -- pre-assigns the destination broker upon unloading).
+# Off: unload bundles from overloaded brokers
+# -- post-assigns the destination broker upon lookups).
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerTransferEnabled=true
+
+# Maximum number of brokers to unload bundle load for each unloading cycle.
+# The bigger value will incur more unloading/transfers for each unloading cycle.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerMaxNumberOfBrokerSheddingPerCycle=3
+
+# Delay (in seconds) to the next unloading cycle after unloading.
+# The logic tries to give enough time for brokers to recompute load after unloading.
+# The bigger value will delay the next unloading cycle longer.
+# (only used in load balancer extension TransferSheddeer)
+loadBalanceSheddingDelayInSeconds=180
+
+# Broker load data time to live (TTL in seconds).
+# The logic tries to avoid (possibly unavailable) brokers with out-dated load data,
+# and those brokers will be ignored in the load computation.
+# When tuning this value, please consider loadBalancerReportUpdateMaxIntervalMinutes.
+#The current default is loadBalancerReportUpdateMaxIntervalMinutes * 2.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerBrokerLoadDataTTLInSeconds=1800
+
+# Max number of bundles in bundle load report from each broker.
+# The load balancer distributes bundles across brokers,
+# based on topK bundle load data and other broker load data.
+# The bigger value will increase the overhead of reporting many bundles in load data.
+# (only used in load balancer extension logics)
+loadBalancerMaxNumberOfBundlesInBundleLoadReport=10
+
+# Service units'(bundles) split interval. Broker periodically checks whether
+# some service units(e.g. bundles) should split if they become hot-spots.
+# (only used in load balancer extension logics)
+loadBalancerSplitIntervalMinutes=1
+
+# Max number of bundles to split to per cycle.
+# (only used in load balancer extension logics)
+loadBalancerMaxNumberOfBundlesToSplitPerCycle=10
+
+# Threshold to the consecutive count of fulfilled split conditions.
+# If the split scheduler consecutively finds bundles that meet split conditions
+# many times bigger than this threshold, the scheduler will trigger splits on the bundles
+# (if the number of bundles is less than loadBalancerNamespaceMaximumBundles).
+# (only used in load balancer extension logics)
+loadBalancerNamespaceBundleSplitConditionHitCountThreshold=3
+
+# After this delay, the service-unit state channel tombstones any service units (e.g., bundles)
+# in semi-terminal states. For example, after splits, parent bundles will be `deleted`,
+# and then after this delay, the parent bundles' state will be `tombstoned`
+# in the service-unit state channel.
+# Pulsar does not immediately remove such semi-terminal states
+# to avoid unnecessary system confusion,
+# as the bundles in the `tombstoned` state might temporarily look available to reassign.
+# Rarely, one could lower this delay in order to aggressively clean
+# the service-unit state channel when there are a large number of bundles.
+# minimum value = 30 secs
+# (only used in load balancer extension logics)
+loadBalancerServiceUnitStateTombstoneDelayTimeInSeconds=3600
+
+
### --- Replication --- ###
# Enable replication metrics
@@ -1666,6 +1762,11 @@ strictBookieAffinityEnabled=false
# These settings are left here for compatibility
+# The heap memory usage weight when calculating new resource usage.
+# It only takes effect in the ThresholdShedder strategy.
+# Deprecated: Memory is no longer used as a load balancing item
+loadBalancerMemoryResourceWeight=1.0
+
# Zookeeper quorum connection string
# Deprecated: use metadataStoreUrl instead
zookeeperServers=
diff --git a/conf/functions_log4j2.xml b/conf/functions_log4j2.xml
index 190d9be92940b..6902a3acd8736 100644
--- a/conf/functions_log4j2.xml
+++ b/conf/functions_log4j2.xml
@@ -120,11 +120,11 @@
- info
+ ${sys:pulsar.log.level}${sys:pulsar.log.appender}${sys:pulsar.log.level}
-
\ No newline at end of file
+
diff --git a/conf/functions_worker.yml b/conf/functions_worker.yml
index b41ac8f37a44f..4c5b6aab1b7f4 100644
--- a/conf/functions_worker.yml
+++ b/conf/functions_worker.yml
@@ -311,6 +311,8 @@ authenticationProviders:
authorizationProvider: org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Set of role names that are treated as "super-user", meaning they will be able to access any admin-api
superUserRoles:
+# Set of role names that are treated as "proxy" roles. These are the roles that can supply the originalPrincipal.
+proxyRoles:
#### tls configuration for worker service
# Enable TLS
@@ -405,6 +407,12 @@ validateConnectorConfig: false
# If it is set to true, you must ensure that it has been initialized by "bin/pulsar initialize-cluster-metadata" command.
initializedDlogMetadata: false
+# Whether to ignore unknown properties when deserializing the connector configuration.
+# After upgrading a connector to a new version with a new configuration, the new configuration may not be compatible with the old connector.
+# In case of rollback, it's required to also rollback the connector configuration.
+# Ignoring unknown fields makes possible to keep the new configuration and only rollback the connector.
+ignoreUnknownConfigFields: false
+
###########################
# Arbitrary Configuration
###########################
diff --git a/conf/log4j2.yaml b/conf/log4j2.yaml
index fff2e063ebd0c..9c261a6b89a50 100644
--- a/conf/log4j2.yaml
+++ b/conf/log4j2.yaml
@@ -78,7 +78,7 @@ Configuration:
basePath: ${sys:pulsar.log.dir}
maxDepth: 2
IfFileName:
- glob: "*/${sys:pulsar.log.file}*log.gz"
+ glob: "${sys:pulsar.log.file}*log.gz"
IfLastModified:
age: 30d
@@ -120,7 +120,7 @@ Configuration:
basePath: ${sys:pulsar.log.dir}
maxDepth: 2
IfFileName:
- glob: "*/${sys:pulsar.log.file}*log.gz"
+ glob: "${sys:pulsar.log.file}*log.gz"
IfLastModified:
age: 30d
- ref: "${sys:pulsar.routing.appender.default}"
diff --git a/conf/proxy.conf b/conf/proxy.conf
index a5110ae57471a..cfc1e47b7c445 100644
--- a/conf/proxy.conf
+++ b/conf/proxy.conf
@@ -58,6 +58,7 @@ bindAddress=0.0.0.0
advertisedAddress=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Enables zero-copy transport of data across network interfaces using the splice system call.
@@ -277,6 +278,11 @@ maxHttpServerConnections=2048
# Max concurrent web requests
maxConcurrentHttpRequests=1024
+# The maximum size in bytes of the request header. Larger headers will allow for more and/or larger cookies plus larger
+# form content encoded in a URL.However, larger headers consume more memory and can make a server more vulnerable to
+# denial of service attacks.
+httpMaxRequestHeaderSize = 8192
+
## Configure the datasource of basic authenticate, supports the file and Base64 format.
# file:
# basicAuthConf=/path/my/.htpasswd
diff --git a/conf/pulsar_tools_env.sh b/conf/pulsar_tools_env.sh
index a356dbb9a28df..9d22b73905df3 100755
--- a/conf/pulsar_tools_env.sh
+++ b/conf/pulsar_tools_env.sh
@@ -42,6 +42,19 @@
# PULSAR_GLOBAL_ZK_CONF=
# Extra options to be passed to the jvm
+# Discard parameter "-Xms" of $PULSAR_MEM, which tends to be the Broker's minimum memory, to avoid using too much
+# memory by tools.
+if [ -n "$PULSAR_MEM" ]; then
+ PULSAR_MEM_ARR=("${PULSAR_MEM}")
+ PULSAR_MEM_REWRITE=""
+ for i in ${PULSAR_MEM_ARR}
+ do
+ if [ "${i:0:4}" != "-Xms" ]; then
+ PULSAR_MEM_REWRITE="$PULSAR_MEM_REWRITE $i";
+ fi
+ done
+ PULSAR_MEM=${PULSAR_MEM_REWRITE}
+fi
PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"}
# Garbage collection options
diff --git a/conf/standalone.conf b/conf/standalone.conf
index ed883406883ed..46e6aed76e42a 100644
--- a/conf/standalone.conf
+++ b/conf/standalone.conf
@@ -48,6 +48,7 @@ bindAddresses=
advertisedAddress=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()
@@ -696,8 +697,8 @@ bookkeeperUseV2WireProtocol=true
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
@@ -1223,3 +1224,44 @@ configurationStoreServers=
# zookeeper.
# Deprecated: use managedLedgerMaxUnackedRangesToPersistInMetadataStore
managedLedgerMaxUnackedRangesToPersistInZooKeeper=-1
+
+# Whether to enable the delayed delivery for messages.
+# If disabled, messages will be immediately delivered and there will
+# be no tracking overhead.
+delayedDeliveryEnabled=true
+
+# Class name of the factory that implements the delayed deliver tracker.
+# If value is "org.apache.pulsar.broker.delayed.BucketDelayedDeliveryTrackerFactory",
+# will create bucket based delayed message index tracker.
+delayedDeliveryTrackerFactoryClassName=org.apache.pulsar.broker.delayed.InMemoryDelayedDeliveryTrackerFactory
+
+# Control the tick time for when retrying on delayed delivery,
+# affecting the accuracy of the delivery time compared to the scheduled time.
+# Note that this time is used to configure the HashedWheelTimer's tick time.
+# Default is 1 second.
+delayedDeliveryTickTimeMillis=1000
+
+# Whether the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
+# time by as much as the tickTimeMillis. This can reduce the overhead on the broker of maintaining the delayed index
+# for a potentially very short time period. When true, messages will not be sent to consumer until the deliverAt time
+# has passed, and they may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the
+# delayedDeliveryTickTimeMillis.
+isDelayedDeliveryDeliverAtTimeStrict=false
+
+# The delayed message index bucket min index count.
+# When the index count of the current bucket is more than this value and all message indexes of current ledger
+# have already been added to the tracker we will seal the bucket.
+delayedDeliveryMinIndexCountPerBucket=50000
+
+# The delayed message index bucket time step(in seconds) in per bucket snapshot segment,
+# after reaching the max time step limitation, the snapshot segment will be cut off.
+delayedDeliveryMaxTimeStepPerBucketSnapshotSegmentSeconds=300
+
+# The max number of delayed message index in per bucket snapshot segment, -1 means no limitation
+# after reaching the max number limitation, the snapshot segment will be cut off.
+delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000
+
+# The max number of delayed message index bucket,
+# after reaching the max buckets limitation, the adjacent buckets will be merged.
+# (disable with value -1)
+delayedDeliveryMaxNumBuckets=-1
diff --git a/conf/websocket.conf b/conf/websocket.conf
index a966f05c71935..2e2824a838c6f 100644
--- a/conf/websocket.conf
+++ b/conf/websocket.conf
@@ -108,6 +108,9 @@ brokerClientAuthenticationPlugin=
brokerClientAuthenticationParameters=
brokerClientTrustCertsFilePath=
+# Whether the hostname is validated when connecting to the broker.
+tlsHostnameVerificationEnabled=false
+
# You can add extra configuration options for the Pulsar Client
# by prefixing them with "brokerClient_". These configurations are applied after hard coded configuration
# and before the above brokerClient configurations named above.
diff --git a/deployment/terraform-ansible/templates/broker.conf b/deployment/terraform-ansible/templates/broker.conf
index f42d4c807d5d9..37e512fb35cc6 100644
--- a/deployment/terraform-ansible/templates/broker.conf
+++ b/deployment/terraform-ansible/templates/broker.conf
@@ -745,8 +745,8 @@ bookkeeperExplicitLacIntervalInMills=0
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
diff --git a/distribution/io/pom.xml b/distribution/io/pom.xml
index cb8591f0d066a..568d76922bf4e 100644
--- a/distribution/io/pom.xml
+++ b/distribution/io/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/offloaders/pom.xml b/distribution/offloaders/pom.xml
index 94395208d4969..d23ebec2ef26d 100644
--- a/distribution/offloaders/pom.xml
+++ b/distribution/offloaders/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/pom.xml b/distribution/pom.xml
index da1e002dddd3c..36a3fa1c5835a 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/server/pom.xml b/distribution/server/pom.xml
index 7bf8c8b32e3a0..f804c9c54b9cd 100644
--- a/distribution/server/pom.xml
+++ b/distribution/server/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
@@ -46,6 +46,12 @@
${project.version}
+
+ ${project.groupId}
+ pulsar-broker-auth-oidc
+ ${project.version}
+
+
${project.groupId}pulsar-broker-auth-sasl
diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt
index dd9ff0966f6ee..d7033ac85646e 100644
--- a/distribution/server/src/assemble/LICENSE.bin.txt
+++ b/distribution/server/src/assemble/LICENSE.bin.txt
@@ -246,26 +246,26 @@ The Apache Software License, Version 2.0
* JCommander -- com.beust-jcommander-1.82.jar
* High Performance Primitive Collections for Java -- com.carrotsearch-hppc-0.9.1.jar
* Jackson
- - com.fasterxml.jackson.core-jackson-annotations-2.13.4.jar
- - com.fasterxml.jackson.core-jackson-core-2.13.4.jar
- - com.fasterxml.jackson.core-jackson-databind-2.13.4.2.jar
- - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.13.4.jar
- - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.13.4.jar
- - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.13.4.jar
- - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.13.4.jar
- - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-parameter-names-2.13.4.jar
+ - com.fasterxml.jackson.core-jackson-annotations-2.14.2.jar
+ - com.fasterxml.jackson.core-jackson-core-2.14.2.jar
+ - com.fasterxml.jackson.core-jackson-databind-2.14.2.jar
+ - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.14.2.jar
+ - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.14.2.jar
+ - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.14.2.jar
+ - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.14.2.jar
+ - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-parameter-names-2.14.2.jar
* Caffeine -- com.github.ben-manes.caffeine-caffeine-2.9.1.jar
* Conscrypt -- org.conscrypt-conscrypt-openjdk-uber-2.5.2.jar
* Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.0.1.jar
- * Bitbucket -- org.bitbucket.b_c-jose4j-0.7.6.jar
+ * Bitbucket -- org.bitbucket.b_c-jose4j-0.9.3.jar
* Gson
- com.google.code.gson-gson-2.8.9.jar
- io.gsonfire-gson-fire-1.8.5.jar
* Guava
- - com.google.guava-guava-31.0.1-jre.jar
+ - com.google.guava-guava-32.0.0-jre.jar
- com.google.guava-failureaccess-1.0.1.jar
- com.google.guava-listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar
* J2ObjC Annotations -- com.google.j2objc-j2objc-annotations-1.3.jar
@@ -289,37 +289,37 @@ The Apache Software License, Version 2.0
- org.apache.commons-commons-lang3-3.11.jar
- org.apache.commons-commons-text-1.10.0.jar
* Netty
- - io.netty-netty-buffer-4.1.86.Final.jar
- - io.netty-netty-codec-4.1.86.Final.jar
- - io.netty-netty-codec-dns-4.1.86.Final.jar
- - io.netty-netty-codec-http-4.1.86.Final.jar
- - io.netty-netty-codec-http2-4.1.86.Final.jar
- - io.netty-netty-codec-socks-4.1.86.Final.jar
- - io.netty-netty-codec-haproxy-4.1.86.Final.jar
- - io.netty-netty-common-4.1.86.Final.jar
- - io.netty-netty-handler-4.1.86.Final.jar
- - io.netty-netty-handler-proxy-4.1.86.Final.jar
- - io.netty-netty-resolver-4.1.86.Final.jar
- - io.netty-netty-resolver-dns-4.1.86.Final.jar
- - io.netty-netty-resolver-dns-classes-macos-4.1.86.Final.jar
- - io.netty-netty-resolver-dns-native-macos-4.1.86.Final-osx-aarch_64.jar
- - io.netty-netty-resolver-dns-native-macos-4.1.86.Final-osx-x86_64.jar
- - io.netty-netty-transport-4.1.86.Final.jar
- - io.netty-netty-transport-classes-epoll-4.1.86.Final.jar
- - io.netty-netty-transport-native-epoll-4.1.86.Final-linux-x86_64.jar
- - io.netty-netty-transport-native-epoll-4.1.86.Final.jar
- - io.netty-netty-transport-native-unix-common-4.1.86.Final.jar
- - io.netty-netty-transport-native-unix-common-4.1.86.Final-linux-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final-linux-aarch_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final-linux-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final-osx-aarch_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final-osx-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.54.Final-windows-x86_64.jar
- - io.netty-netty-tcnative-classes-2.0.54.Final.jar
- - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.16.Final.jar
- - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.16.Final-linux-x86_64.jar
- - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.16.Final-linux-aarch_64.jar
+ - io.netty-netty-buffer-4.1.93.Final.jar
+ - io.netty-netty-codec-4.1.93.Final.jar
+ - io.netty-netty-codec-dns-4.1.93.Final.jar
+ - io.netty-netty-codec-http-4.1.93.Final.jar
+ - io.netty-netty-codec-http2-4.1.93.Final.jar
+ - io.netty-netty-codec-socks-4.1.93.Final.jar
+ - io.netty-netty-codec-haproxy-4.1.93.Final.jar
+ - io.netty-netty-common-4.1.93.Final.jar
+ - io.netty-netty-handler-4.1.93.Final.jar
+ - io.netty-netty-handler-proxy-4.1.93.Final.jar
+ - io.netty-netty-resolver-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-classes-macos-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-native-macos-4.1.93.Final-osx-aarch_64.jar
+ - io.netty-netty-resolver-dns-native-macos-4.1.93.Final-osx-x86_64.jar
+ - io.netty-netty-transport-4.1.93.Final.jar
+ - io.netty-netty-transport-classes-epoll-4.1.93.Final.jar
+ - io.netty-netty-transport-native-epoll-4.1.93.Final-linux-x86_64.jar
+ - io.netty-netty-transport-native-epoll-4.1.93.Final.jar
+ - io.netty-netty-transport-native-unix-common-4.1.93.Final.jar
+ - io.netty-netty-transport-native-unix-common-4.1.93.Final-linux-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar
+ - io.netty-netty-tcnative-classes-2.0.61.Final.jar
+ - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar
+ - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar
+ - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar
* Prometheus client
- io.prometheus.jmx-collector-0.16.1.jar
- io.prometheus-simpleclient-0.16.0.jar
@@ -343,35 +343,37 @@ The Apache Software License, Version 2.0
- org.apache.logging.log4j-log4j-slf4j-impl-2.18.0.jar
- org.apache.logging.log4j-log4j-web-2.18.0.jar
* Java Native Access JNA
- - net.java.dev.jna-jna-5.12.1.jar
- net.java.dev.jna-jna-jpms-5.12.1.jar
- net.java.dev.jna-jna-platform-jpms-5.12.1.jar
* BookKeeper
- - org.apache.bookkeeper-bookkeeper-common-4.15.3.jar
- - org.apache.bookkeeper-bookkeeper-common-allocator-4.15.3.jar
- - org.apache.bookkeeper-bookkeeper-proto-4.15.3.jar
- - org.apache.bookkeeper-bookkeeper-server-4.15.3.jar
- - org.apache.bookkeeper-bookkeeper-tools-framework-4.15.3.jar
- - org.apache.bookkeeper-circe-checksum-4.15.3.jar
- - org.apache.bookkeeper-cpu-affinity-4.15.3.jar
- - org.apache.bookkeeper-statelib-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-api-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-common-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-java-client-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-java-client-base-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-proto-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-server-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-service-api-4.15.3.jar
- - org.apache.bookkeeper-stream-storage-service-impl-4.15.3.jar
- - org.apache.bookkeeper.http-http-server-4.15.3.jar
- - org.apache.bookkeeper.http-vertx-http-server-4.15.3.jar
- - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.15.3.jar
- - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.15.3.jar
- - org.apache.distributedlog-distributedlog-common-4.15.3.jar
- - org.apache.distributedlog-distributedlog-core-4.15.3-tests.jar
- - org.apache.distributedlog-distributedlog-core-4.15.3.jar
- - org.apache.distributedlog-distributedlog-protocol-4.15.3.jar
- - org.apache.bookkeeper.stats-codahale-metrics-provider-4.15.3.jar
+ - org.apache.bookkeeper-bookkeeper-common-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-proto-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-server-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.1.jar
+ - org.apache.bookkeeper-circe-checksum-4.16.1.jar
+ - org.apache.bookkeeper-cpu-affinity-4.16.1.jar
+ - org.apache.bookkeeper-statelib-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-api-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-common-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-java-client-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-java-client-base-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-proto-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-server-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-service-api-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-service-impl-4.16.1.jar
+ - org.apache.bookkeeper.http-http-server-4.16.1.jar
+ - org.apache.bookkeeper.http-vertx-http-server-4.16.1.jar
+ - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.1.jar
+ - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-common-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-core-4.16.1-tests.jar
+ - org.apache.distributedlog-distributedlog-core-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-protocol-4.16.1.jar
+ - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.1.jar
+ - org.apache.bookkeeper-native-io-4.16.1.jar
* Apache HTTP Client
- org.apache.httpcomponents-httpclient-4.5.13.jar
- org.apache.httpcomponents-httpcore-4.4.15.jar
@@ -381,27 +383,27 @@ The Apache Software License, Version 2.0
- org.asynchttpclient-async-http-client-2.12.1.jar
- org.asynchttpclient-async-http-client-netty-utils-2.12.1.jar
* Jetty
- - org.eclipse.jetty-jetty-client-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-continuation-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-http-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-io-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-proxy-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-security-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-server-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-servlet-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-servlets-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-util-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-util-ajax-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-websocket-api-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-websocket-client-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-websocket-common-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-websocket-server-9.4.48.v20220622.jar
- - org.eclipse.jetty.websocket-websocket-servlet-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.48.v20220622.jar
- - org.eclipse.jetty-jetty-alpn-server-9.4.48.v20220622.jar
- * SnakeYaml -- org.yaml-snakeyaml-1.32.jar
- * RocksDB - org.rocksdb-rocksdbjni-6.29.4.1.jar
+ - org.eclipse.jetty-jetty-client-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-continuation-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-http-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-io-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-proxy-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-security-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-server-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-servlet-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-servlets-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-util-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-util-ajax-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-websocket-api-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-websocket-client-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-websocket-common-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-websocket-server-9.4.51.v20230217.jar
+ - org.eclipse.jetty.websocket-websocket-servlet-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.51.v20230217.jar
+ - org.eclipse.jetty-jetty-alpn-server-9.4.51.v20230217.jar
+ * SnakeYaml -- org.yaml-snakeyaml-2.0.jar
+ * RocksDB - org.rocksdb-rocksdbjni-7.9.2.jar
* Google Error Prone Annotations - com.google.errorprone-error_prone_annotations-2.5.1.jar
* Apache Thrift - org.apache.thrift-libthrift-0.14.2.jar
* OkHttp3
@@ -410,10 +412,10 @@ The Apache Software License, Version 2.0
* Okio - com.squareup.okio-okio-2.8.0.jar
* Javassist -- org.javassist-javassist-3.25.0-GA.jar
* Kotlin Standard Lib
- - org.jetbrains.kotlin-kotlin-stdlib-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-common-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.4.32.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-common-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.8.20.jar
- org.jetbrains-annotations-13.0.jar
* gRPC
- io.grpc-grpc-all-1.45.1.jar
@@ -431,7 +433,6 @@ The Apache Software License, Version 2.0
- io.grpc-grpc-services-1.45.1.jar
- io.grpc-grpc-xds-1.45.1.jar
- io.grpc-grpc-rls-1.45.1.jar
- - com.google.auto.service-auto-service-annotations-1.0.jar
* Perfmark
- io.perfmark-perfmark-api-0.19.0.jar
* OpenCensus
@@ -451,9 +452,9 @@ The Apache Software License, Version 2.0
* Apache Yetus
- org.apache.yetus-audience-annotations-0.12.0.jar
* Kubernetes Client
- - io.kubernetes-client-java-12.0.1.jar
- - io.kubernetes-client-java-api-12.0.1.jar
- - io.kubernetes-client-java-proto-12.0.1.jar
+ - io.kubernetes-client-java-18.0.0.jar
+ - io.kubernetes-client-java-api-18.0.0.jar
+ - io.kubernetes-client-java-proto-18.0.0.jar
* Dropwizard
- io.dropwizard.metrics-metrics-core-4.1.12.1.jar
- io.dropwizard.metrics-metrics-graphite-4.1.12.1.jar
@@ -468,15 +469,16 @@ The Apache Software License, Version 2.0
* JCTools - Java Concurrency Tools for the JVM
- org.jctools-jctools-core-2.1.2.jar
* Vertx
- - io.vertx-vertx-auth-common-3.9.8.jar
- - io.vertx-vertx-bridge-common-3.9.8.jar
- - io.vertx-vertx-core-3.9.8.jar
- - io.vertx-vertx-web-3.9.8.jar
- - io.vertx-vertx-web-common-3.9.8.jar
+ - io.vertx-vertx-auth-common-4.3.8.jar
+ - io.vertx-vertx-bridge-common-4.3.8.jar
+ - io.vertx-vertx-core-4.3.8.jar
+ - io.vertx-vertx-web-4.3.8.jar
+ - io.vertx-vertx-web-common-4.3.8.jar
+ - io.vertx-vertx-grpc-4.3.5.jar
* Apache ZooKeeper
- - org.apache.zookeeper-zookeeper-3.8.0.jar
- - org.apache.zookeeper-zookeeper-jute-3.8.0.jar
- - org.apache.zookeeper-zookeeper-prometheus-metrics-3.8.0.jar
+ - org.apache.zookeeper-zookeeper-3.8.1.jar
+ - org.apache.zookeeper-zookeeper-jute-3.8.1.jar
+ - org.apache.zookeeper-zookeeper-prometheus-metrics-3.8.1.jar
* Snappy Java
- org.xerial.snappy-snappy-java-1.1.8.4.jar
* Google HTTP Client
@@ -485,8 +487,10 @@ The Apache Software License, Version 2.0
- com.google.auto.value-auto-value-annotations-1.9.jar
- com.google.re2j-re2j-1.5.jar
* Jetcd
- - io.etcd-jetcd-common-0.5.11.jar
- - io.etcd-jetcd-core-0.5.11.jar
+ - io.etcd-jetcd-api-0.7.5.jar
+ - io.etcd-jetcd-common-0.7.5.jar
+ - io.etcd-jetcd-core-0.7.5.jar
+ - io.etcd-jetcd-grpc-0.7.5.jar
* IPAddress
- com.github.seancfoley-ipaddress-5.3.3.jar
* RxJava
@@ -494,7 +498,7 @@ The Apache Software License, Version 2.0
* RabbitMQ Java Client
- com.rabbitmq-amqp-client-5.5.3.jar
* RoaringBitmap
- - org.roaringbitmap-RoaringBitmap-0.9.15.jar
+ - org.roaringbitmap-RoaringBitmap-0.9.44.jar
BSD 3-clause "New" or "Revised" License
* Google auth library
@@ -514,9 +518,12 @@ MIT License
- org.slf4j-slf4j-api-1.7.32.jar
- org.slf4j-jcl-over-slf4j-1.7.32.jar
* The Checker Framework
- - org.checkerframework-checker-qual-3.12.0.jar
+ - org.checkerframework-checker-qual-3.33.0.jar
* oshi
- com.github.oshi-oshi-core-java11-6.4.0.jar
+ * Auth0, Inc.
+ - com.auth0-java-jwt-4.3.0.jar
+ - com.auth0-jwks-rsa-0.22.0.jar
Protocol Buffers License
* Protocol Buffers
- com.google.protobuf-protobuf-java-3.19.6.jar -- ../licenses/LICENSE-protobuf.txt
diff --git a/distribution/shell/pom.xml b/distribution/shell/pom.xml
index 5c6c332120c96..9e3134a75e5bf 100644
--- a/distribution/shell/pom.xml
+++ b/distribution/shell/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/shell/src/assemble/LICENSE.bin.txt b/distribution/shell/src/assemble/LICENSE.bin.txt
index 4e1d07034ff92..514bb41a9499c 100644
--- a/distribution/shell/src/assemble/LICENSE.bin.txt
+++ b/distribution/shell/src/assemble/LICENSE.bin.txt
@@ -311,22 +311,22 @@ This projects includes binary packages with the following licenses:
The Apache Software License, Version 2.0
* JCommander -- jcommander-1.82.jar
* Jackson
- - jackson-annotations-2.13.4.jar
- - jackson-core-2.13.4.jar
- - jackson-databind-2.13.4.2.jar
- - jackson-dataformat-yaml-2.13.4.jar
- - jackson-jaxrs-base-2.13.4.jar
- - jackson-jaxrs-json-provider-2.13.4.jar
- - jackson-module-jaxb-annotations-2.13.4.jar
- - jackson-module-jsonSchema-2.13.4.jar
- - jackson-datatype-jdk8-2.13.4.jar
- - jackson-datatype-jsr310-2.13.4.jar
- - jackson-module-parameter-names-2.13.4.jar
+ - jackson-annotations-2.14.2.jar
+ - jackson-core-2.14.2.jar
+ - jackson-databind-2.14.2.jar
+ - jackson-dataformat-yaml-2.14.2.jar
+ - jackson-jaxrs-base-2.14.2.jar
+ - jackson-jaxrs-json-provider-2.14.2.jar
+ - jackson-module-jaxb-annotations-2.14.2.jar
+ - jackson-module-jsonSchema-2.14.2.jar
+ - jackson-datatype-jdk8-2.14.2.jar
+ - jackson-datatype-jsr310-2.14.2.jar
+ - jackson-module-parameter-names-2.14.2.jar
* Conscrypt -- conscrypt-openjdk-uber-2.5.2.jar
* Gson
- gson-2.8.9.jar
* Guava
- - guava-31.0.1-jre.jar
+ - guava-32.0.0-jre.jar
- failureaccess-1.0.1.jar
- listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar
* J2ObjC Annotations -- j2objc-annotations-1.3.jar
@@ -348,35 +348,35 @@ The Apache Software License, Version 2.0
- commons-text-1.10.0.jar
- commons-compress-1.21.jar
* Netty
- - netty-buffer-4.1.86.Final.jar
- - netty-codec-4.1.86.Final.jar
- - netty-codec-dns-4.1.86.Final.jar
- - netty-codec-http-4.1.86.Final.jar
- - netty-codec-socks-4.1.86.Final.jar
- - netty-codec-haproxy-4.1.86.Final.jar
- - netty-common-4.1.86.Final.jar
- - netty-handler-4.1.86.Final.jar
- - netty-handler-proxy-4.1.86.Final.jar
- - netty-resolver-4.1.86.Final.jar
- - netty-resolver-dns-4.1.86.Final.jar
- - netty-transport-4.1.86.Final.jar
- - netty-transport-classes-epoll-4.1.86.Final.jar
- - netty-transport-native-epoll-4.1.86.Final-linux-x86_64.jar
- - netty-transport-native-unix-common-4.1.86.Final.jar
- - netty-transport-native-unix-common-4.1.86.Final-linux-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.54.Final.jar
- - netty-tcnative-boringssl-static-2.0.54.Final-linux-aarch_64.jar
- - netty-tcnative-boringssl-static-2.0.54.Final-linux-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.54.Final-osx-aarch_64.jar
- - netty-tcnative-boringssl-static-2.0.54.Final-osx-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.54.Final-windows-x86_64.jar
- - netty-tcnative-classes-2.0.54.Final.jar
- - netty-incubator-transport-classes-io_uring-0.0.16.Final.jar
- - netty-incubator-transport-native-io_uring-0.0.16.Final-linux-aarch_64.jar
- - netty-incubator-transport-native-io_uring-0.0.16.Final-linux-x86_64.jar
- - netty-resolver-dns-classes-macos-4.1.86.Final.jar
- - netty-resolver-dns-native-macos-4.1.86.Final-osx-aarch_64.jar
- - netty-resolver-dns-native-macos-4.1.86.Final-osx-x86_64.jar
+ - netty-buffer-4.1.93.Final.jar
+ - netty-codec-4.1.93.Final.jar
+ - netty-codec-dns-4.1.93.Final.jar
+ - netty-codec-http-4.1.93.Final.jar
+ - netty-codec-socks-4.1.93.Final.jar
+ - netty-codec-haproxy-4.1.93.Final.jar
+ - netty-common-4.1.93.Final.jar
+ - netty-handler-4.1.93.Final.jar
+ - netty-handler-proxy-4.1.93.Final.jar
+ - netty-resolver-4.1.93.Final.jar
+ - netty-resolver-dns-4.1.93.Final.jar
+ - netty-transport-4.1.93.Final.jar
+ - netty-transport-classes-epoll-4.1.93.Final.jar
+ - netty-transport-native-epoll-4.1.93.Final-linux-x86_64.jar
+ - netty-transport-native-unix-common-4.1.93.Final.jar
+ - netty-transport-native-unix-common-4.1.93.Final-linux-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar
+ - netty-tcnative-classes-2.0.61.Final.jar
+ - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar
+ - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar
+ - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar
+ - netty-resolver-dns-classes-macos-4.1.93.Final.jar
+ - netty-resolver-dns-native-macos-4.1.93.Final-osx-aarch_64.jar
+ - netty-resolver-dns-native-macos-4.1.93.Final-osx-x86_64.jar
* Prometheus client
- simpleclient-0.16.0.jar
- simpleclient_log4j2-0.16.0.jar
@@ -390,24 +390,24 @@ The Apache Software License, Version 2.0
- log4j-web-2.18.0.jar
* BookKeeper
- - bookkeeper-common-allocator-4.15.3.jar
- - cpu-affinity-4.15.3.jar
- - circe-checksum-4.15.3.jar
+ - bookkeeper-common-allocator-4.16.1.jar
+ - cpu-affinity-4.16.1.jar
+ - circe-checksum-4.16.1.jar
* AirCompressor
- aircompressor-0.20.jar
* AsyncHttpClient
- async-http-client-2.12.1.jar
- async-http-client-netty-utils-2.12.1.jar
* Jetty
- - jetty-client-9.4.48.v20220622.jar
- - jetty-http-9.4.48.v20220622.jar
- - jetty-io-9.4.48.v20220622.jar
- - jetty-util-9.4.48.v20220622.jar
- - javax-websocket-client-impl-9.4.48.v20220622.jar
- - websocket-api-9.4.48.v20220622.jar
- - websocket-client-9.4.48.v20220622.jar
- - websocket-common-9.4.48.v20220622.jar
- * SnakeYaml -- snakeyaml-1.32.jar
+ - jetty-client-9.4.51.v20230217.jar
+ - jetty-http-9.4.51.v20230217.jar
+ - jetty-io-9.4.51.v20230217.jar
+ - jetty-util-9.4.51.v20230217.jar
+ - javax-websocket-client-impl-9.4.51.v20230217.jar
+ - websocket-api-9.4.51.v20230217.jar
+ - websocket-client-9.4.51.v20230217.jar
+ - websocket-common-9.4.51.v20230217.jar
+ * SnakeYaml -- snakeyaml-2.0.jar
* Google Error Prone Annotations - error_prone_annotations-2.5.1.jar
* Javassist -- javassist-3.25.0-GA.jar
* Apache Avro
@@ -422,7 +422,7 @@ MIT License
* SLF4J -- ../licenses/LICENSE-SLF4J.txt
- slf4j-api-1.7.32.jar
* The Checker Framework
- - checker-qual-3.12.0.jar
+ - checker-qual-3.33.0.jar
Protocol Buffers License
* Protocol Buffers
diff --git a/docker/build.sh b/docker/build.sh
index d8ab4bea882c4..88be44f23e73f 100755
--- a/docker/build.sh
+++ b/docker/build.sh
@@ -18,7 +18,7 @@
# under the License.
#
-ROOT_DIR=$(git rev-parse --show-toplevel)
+ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )"
cd $ROOT_DIR/docker
mvn package -Pdocker,-main
diff --git a/docker/get-version.sh b/docker/get-version.sh
index 07145e7cf0c18..0b736baf3b270 100755
--- a/docker/get-version.sh
+++ b/docker/get-version.sh
@@ -18,7 +18,7 @@
# under the License.
#
-ROOT_DIR=$(git rev-parse --show-toplevel)
+ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )"
pushd $ROOT_DIR > /dev/null
diff --git a/docker/pom.xml b/docker/pom.xml
index 77c427b05aef2..afe55f0fe57f0 100644
--- a/docker/pom.xml
+++ b/docker/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOTdocker-imagesApache Pulsar :: Docker Images
@@ -60,6 +60,20 @@
pulsarpulsar-all
+
+
+
+ pl.project13.maven
+ git-commit-id-plugin
+
+ false
+ true
+ true
+ false
+
+
+
+
diff --git a/docker/publish.sh b/docker/publish.sh
index af0d72d4b3437..651fefc1498e9 100755
--- a/docker/publish.sh
+++ b/docker/publish.sh
@@ -18,7 +18,7 @@
# under the License.
#
-ROOT_DIR=$(git rev-parse --show-toplevel)
+ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )"
cd $ROOT_DIR/docker
# We should only publish images that are made from official and approved releases
@@ -49,6 +49,9 @@ fi
MVN_VERSION=`./get-version.sh`
echo "Pulsar version: ${MVN_VERSION}"
+GIT_COMMIT_ID_ABBREV=$(git rev-parse --short=7 HEAD 2>/dev/null || echo no-git)
+GIT_BRANCH=$(git branch --show-current 2>/dev/null || echo no-git)
+IMAGE_TAG="${MVN_VERSION}-${GIT_COMMIT_ID_ABBREV}"
if [[ -z ${DOCKER_REGISTRY} ]]; then
docker_registry_org=${DOCKER_ORG}
@@ -62,16 +65,21 @@ set -x
# Fail if any of the subsequent commands fail
set -e
-docker tag pulsar:latest ${docker_registry_org}/pulsar:latest
-docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:latest
+if [[ "$GIT_BRANCH" == "master" ]]; then
+ docker tag apachepulsar/pulsar:${IMAGE_TAG} ${docker_registry_org}/pulsar:latest
+ docker tag apachepulsar/pulsar-all:${IMAGE_TAG} ${docker_registry_org}/pulsar-all:latest
+fi
-docker tag pulsar:latest ${docker_registry_org}/pulsar:$MVN_VERSION
-docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:$MVN_VERSION
+docker tag apachepulsar/pulsar:${IMAGE_TAG} ${docker_registry_org}/pulsar:$MVN_VERSION
+docker tag apachepulsar/pulsar-all:${IMAGE_TAG} ${docker_registry_org}/pulsar-all:$MVN_VERSION
# Push all images and tags
-docker push ${docker_registry_org}/pulsar:latest
-docker push ${docker_registry_org}/pulsar-all:latest
+if [[ "$GIT_BRANCH" == "master" ]]; then
+ docker push ${docker_registry_org}/pulsar:latest
+ docker push ${docker_registry_org}/pulsar-all:latest
+fi
+
docker push ${docker_registry_org}/pulsar:$MVN_VERSION
docker push ${docker_registry_org}/pulsar-all:$MVN_VERSION
-echo "Finished pushing images to ${docker_registry_org}"
+echo "Finished pushing images to ${docker_registry_org}"
\ No newline at end of file
diff --git a/docker/pulsar-all/Dockerfile b/docker/pulsar-all/Dockerfile
index 42431fc94a067..81ad74b65000f 100644
--- a/docker/pulsar-all/Dockerfile
+++ b/docker/pulsar-all/Dockerfile
@@ -17,6 +17,7 @@
# under the License.
#
+ARG PULSAR_IMAGE
FROM busybox as pulsar-all
ARG PULSAR_IO_DIR
@@ -26,6 +27,6 @@ ADD ${PULSAR_IO_DIR} /connectors
ADD ${PULSAR_OFFLOADER_TARBALL} /
RUN mv /apache-pulsar-offloaders-*/offloaders /offloaders
-FROM apachepulsar/pulsar:latest
+FROM $PULSAR_IMAGE
COPY --from=pulsar-all /connectors /pulsar/connectors
COPY --from=pulsar-all /offloaders /pulsar/offloaders
diff --git a/docker/pulsar-all/pom.xml b/docker/pulsar-all/pom.xml
index fba5b1df5070c..e616ac132d319 100644
--- a/docker/pulsar-all/pom.xml
+++ b/docker/pulsar-all/pom.xml
@@ -23,7 +23,7 @@
org.apache.pulsardocker-images
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT4.0.0pulsar-all-docker-image
@@ -67,11 +67,18 @@
- docker
+ git-commit-id-no-git
+
+
+ ${basedir}/../../.git/index
+
+
- target/apache-pulsar-io-connectors-${project.version}-bin
- target/pulsar-offloader-distribution-${project.version}-bin.tar.gz
+ no-git
+
+
+ docker
@@ -143,17 +150,26 @@
- pulsar-all
+ ${docker.organization}/pulsar-all${project.basedir}latest
+ ${project.version}-${git.commit.id.abbrev}
+
+ target/apache-pulsar-io-connectors-${project.version}-bin
+ target/pulsar-offloader-distribution-${project.version}-bin.tar.gz
+ ${docker.organization}/pulsar:${project.version}-${git.commit.id.abbrev}
+
+
+
+ ${docker.platforms}
+
+
- latest
- ${docker.organization}
@@ -161,5 +177,29 @@
+
+
+ docker-push
+
+
+
+ io.fabric8
+ docker-maven-plugin
+
+
+ default
+ package
+
+ build
+ tag
+ push
+
+
+
+
+
+
+
+
diff --git a/docker/pulsar/Dockerfile b/docker/pulsar/Dockerfile
index 01e53e0152ac6..a5b294063d376 100644
--- a/docker/pulsar/Dockerfile
+++ b/docker/pulsar/Dockerfile
@@ -49,7 +49,7 @@ RUN chmod g+w /pulsar/trino
### Create 2nd stage from Ubuntu image
### and add OpenJDK and Python dependencies (for Pulsar functions)
-FROM ubuntu:20.04
+FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive
ARG UBUNTU_MIRROR=mirror://mirrors.ubuntu.com/mirrors.txt
diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml
index 293d1daf8e208..85d86cff12523 100644
--- a/docker/pulsar/pom.xml
+++ b/docker/pulsar/pom.xml
@@ -23,7 +23,7 @@
org.apache.pulsardocker-images
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT4.0.0pulsar-docker-image
@@ -47,15 +47,25 @@
+
+ mirror://mirrors.ubuntu.com/mirrors.txt
+ http://security.ubuntu.com/ubuntu/
+
+
- docker
+ git-commit-id-no-git
+
+
+ ${basedir}/../../.git/index
+
+
- target/pulsar-server-distribution-${project.version}-bin.tar.gz
- ${pulsar.client.python.version}
- ${env.UBUNTU_MIRROR}
- ${env.UBUNTU_SECURITY_MIRROR}
+ no-git
+
+
+ docker
@@ -72,17 +82,27 @@
- pulsar
+ ${docker.organization}/pulsar
+
+ target/pulsar-server-distribution-${project.version}-bin.tar.gz
+ ${pulsar.client.python.version}
+ ${UBUNTU_MIRROR}
+ ${UBUNTU_SECURITY_MIRROR}
+ ${project.basedir}latest
+ ${project.version}-${git.commit.id.abbrev}
+
+
+ ${docker.platforms}
+
+
- latest
- ${docker.organization}
@@ -108,5 +128,29 @@
+
+
+ docker-push
+
+
+
+ io.fabric8
+ docker-maven-plugin
+
+
+ default
+ package
+
+ build
+ tag
+ push
+
+
+
+
+
+
+
+
diff --git a/jclouds-shaded/pom.xml b/jclouds-shaded/pom.xml
index 41c713c4f5d8f..dfb155c2d5a7d 100644
--- a/jclouds-shaded/pom.xml
+++ b/jclouds-shaded/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/kafka-connect-avro-converter-shaded/pom.xml b/kafka-connect-avro-converter-shaded/pom.xml
deleted file mode 100644
index a907269086b57..0000000000000
--- a/kafka-connect-avro-converter-shaded/pom.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-
-
-
- 4.0.0
-
- pulsar
- org.apache.pulsar
- 2.12.0-SNAPSHOT
- ..
-
-
- kafka-connect-avro-converter-shaded
- Apache Pulsar :: Kafka Connect Avro Converter shaded
-
-
-
-
- io.confluent
- kafka-connect-avro-converter
- ${confluent.version}
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
-
-
- ${shadePluginPhase}
-
- shade
-
-
-
-
- true
- true
-
-
-
- io.confluent:*
- io.confluent:kafka-avro-serializer
- io.confluent:kafka-schema-registry-client
- io.confluent:common-config
- io.confluent:common-utils
- org.apache.avro:*
-
- org.codehaus.jackson:jackson-core-asl
- org.codehaus.jackson:jackson-mapper-asl
- com.thoughtworks.paranamer:paranamer
- org.xerial.snappy:snappy-java
- org.apache.commons:commons-compress
- org.tukaani:xz
-
-
-
-
- io.confluent
- org.apache.pulsar.kafka.shade.io.confluent
-
-
- org.apache.avro
- org.apache.pulsar.kafka.shade.avro
-
-
- org.codehaus.jackson
- org.apache.pulsar.kafka.shade.org.codehaus.jackson
-
-
- com.thoughtworks.paranamer
- org.apache.pulsar.kafka.shade.com.thoughtworks.paranamer
-
-
- org.xerial.snappy
- org.apache.pulsar.kafka.shade.org.xerial.snappy
-
-
- org.apache.commons
- org.apache.pulsar.kafka.shade.org.apache.commons
-
-
- org.tukaani
- org.apache.pulsar.kafka.shade.org.tukaani
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/managed-ledger/pom.xml b/managed-ledger/pom.xml
index b0eab9a36bb6a..a8cb560b7b376 100644
--- a/managed-ledger/pom.xml
+++ b/managed-ledger/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
index 42f92359f9a94..7ecb8f08d573d 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
@@ -42,7 +42,7 @@ public interface LedgerOffloaderFactory {
boolean isDriverSupported(String driverName);
/**
- * Create a ledger offloader with the provided configuration, user-metadata and scheduler.
+ * Create a ledger offloader with the provided configuration, user-metadata, scheduler and offloaderStats.
*
* @param offloadPolicies offload policies
* @param userMetadata user metadata
@@ -50,12 +50,29 @@ public interface LedgerOffloaderFactory {
* @return the offloader instance
* @throws IOException when fail to create an offloader
*/
+ T create(OffloadPoliciesImpl offloadPolicies,
+ Map userMetadata,
+ OrderedScheduler scheduler)
+ throws IOException;
+
+
+ /**
+ * Create a ledger offloader with the provided configuration, user-metadata, scheduler and offloaderStats.
+ *
+ * @param offloadPolicies offload policies
+ * @param userMetadata user metadata
+ * @param scheduler scheduler
+ * @param offloaderStats offloaderStats
+ * @return the offloader instance
+ * @throws IOException when fail to create an offloader
+ */
T create(OffloadPoliciesImpl offloadPolicies,
Map userMetadata,
OrderedScheduler scheduler,
LedgerOffloaderStats offloaderStats)
throws IOException;
+
/**
* Create a ledger offloader with the provided configuration, user-metadata, schema storage and scheduler.
*
@@ -66,6 +83,26 @@ T create(OffloadPoliciesImpl offloadPolicies,
* @return the offloader instance
* @throws IOException when fail to create an offloader
*/
+ default T create(OffloadPoliciesImpl offloadPolicies,
+ Map userMetadata,
+ SchemaStorage schemaStorage,
+ OrderedScheduler scheduler)
+ throws IOException {
+ return create(offloadPolicies, userMetadata, scheduler);
+ }
+
+ /**
+ * Create a ledger offloader with the provided configuration, user-metadata, schema storage,
+ * scheduler and offloaderStats.
+ *
+ * @param offloadPolicies offload policies
+ * @param userMetadata user metadata
+ * @param schemaStorage used for schema lookup in offloader
+ * @param scheduler scheduler
+ * @param offloaderStats offloaderStats
+ * @return the offloader instance
+ * @throws IOException when fail to create an offloader
+ */
default T create(OffloadPoliciesImpl offloadPolicies,
Map userMetadata,
SchemaStorage schemaStorage,
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
index 0fe0f453347bf..eeac9cfcfa994 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
@@ -20,9 +20,9 @@
import java.util.concurrent.TimeUnit;
-class LedgerOffloaderStatsDisable implements LedgerOffloaderStats {
+public class LedgerOffloaderStatsDisable implements LedgerOffloaderStats {
- static final LedgerOffloaderStats INSTANCE = new LedgerOffloaderStatsDisable();
+ public static final LedgerOffloaderStats INSTANCE = new LedgerOffloaderStatsDisable();
private LedgerOffloaderStatsDisable() {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
index 7802ed07781ba..edbfa0b43204e 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
@@ -786,6 +786,12 @@ Set extends Position> asyncReplayEntries(
*/
long getEstimatedSizeSinceMarkDeletePosition();
+ /**
+ * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
+ * used to delete information about this ledger in the ManagedCursor.
+ */
+ default void skipNonRecoverableLedger(long ledgerId){}
+
/**
* Returns cursor throttle mark-delete rate.
*
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
index 4ca56508891a1..c7dd8ea9129b7 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
@@ -631,6 +631,12 @@ void asyncSetProperties(Map properties, AsyncCallbacks.UpdatePro
*/
void trimConsumedLedgersInBackground(CompletableFuture> promise);
+ /**
+ * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
+ * used to delete information about this ledger in the ManagedCursor.
+ */
+ default void skipNonRecoverableLedger(long ledgerId){}
+
/**
* Roll current ledger if it is full.
*/
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
index 6e88a8e650d58..0c93a5b642cf6 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
@@ -62,7 +62,7 @@ public class ManagedLedgerConfig {
private int ledgerRolloverTimeout = 4 * 3600;
private double throttleMarkDelete = 0;
private long retentionTimeMs = 0;
- private int retentionSizeInMB = 0;
+ private long retentionSizeInMB = 0;
private boolean autoSkipNonRecoverableData;
private boolean lazyCursorRecovery = false;
private long metadataOperationsTimeoutSeconds = 60;
@@ -396,7 +396,7 @@ public ManagedLedgerConfig setThrottleMarkDelete(double throttleMarkDelete) {
/**
* Set the retention time for the ManagedLedger.
*
- * Retention time and retention size ({@link #setRetentionSizeInMB(int)}) are together used to retain the
+ * Retention time and retention size ({@link #setRetentionSizeInMB(long)}) are together used to retain the
* ledger data when there are no cursors or when all the cursors have marked the data for deletion.
* Data will be deleted in this case when both retention time and retention size settings don't prevent deleting
* the data marked for deletion.
@@ -438,7 +438,7 @@ public long getRetentionTimeMillis() {
* @param retentionSizeInMB
* quota for message retention
*/
- public ManagedLedgerConfig setRetentionSizeInMB(int retentionSizeInMB) {
+ public ManagedLedgerConfig setRetentionSizeInMB(long retentionSizeInMB) {
this.retentionSizeInMB = retentionSizeInMB;
return this;
}
@@ -447,7 +447,7 @@ public ManagedLedgerConfig setRetentionSizeInMB(int retentionSizeInMB) {
* @return quota for message retention
*
*/
- public int getRetentionSizeInMB() {
+ public long getRetentionSizeInMB() {
return retentionSizeInMB;
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
index 5aa4e8374d73a..386310b3ccbae 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
@@ -39,7 +39,6 @@ public class ManagedLedgerFactoryConfig {
*/
private double cacheEvictionWatermark = 0.90;
- private int numManagedLedgerWorkerThreads = Runtime.getRuntime().availableProcessors();
private int numManagedLedgerSchedulerThreads = Runtime.getRuntime().availableProcessors();
/**
@@ -92,8 +91,30 @@ public class ManagedLedgerFactoryConfig {
*/
private String managedLedgerInfoCompressionType = MLDataFormats.CompressionType.NONE.name();
+ /**
+ * ManagedLedgerInfo compression threshold. If the origin metadata size below configuration.
+ * compression will not apply.
+ */
+ private long managedLedgerInfoCompressionThresholdInBytes = 0;
+
/**
* ManagedCursorInfo compression type. If the compression type is null or invalid, don't compress data.
*/
private String managedCursorInfoCompressionType = MLDataFormats.CompressionType.NONE.name();
+
+ /**
+ * ManagedCursorInfo compression threshold. If the origin metadata size below configuration.
+ * compression will not apply.
+ */
+ private long managedCursorInfoCompressionThresholdInBytes = 0;
+
+ public MetadataCompressionConfig getCompressionConfigForManagedLedgerInfo() {
+ return new MetadataCompressionConfig(managedLedgerInfoCompressionType,
+ managedLedgerInfoCompressionThresholdInBytes);
+ }
+
+ public MetadataCompressionConfig getCompressionConfigForManagedCursorInfo() {
+ return new MetadataCompressionConfig(managedCursorInfoCompressionType,
+ managedCursorInfoCompressionThresholdInBytes);
+ }
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
index 94c2f61e00afe..50a3ffb157961 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
@@ -100,6 +100,11 @@ public interface ManagedLedgerMXBean {
*/
long getReadEntriesErrors();
+ /**
+ * @return the number of readEntries requests that cache miss Rate
+ */
+ double getReadEntriesOpsCacheMissesRate();
+
// Entry size statistics
double getEntrySizeAverage();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java
new file mode 100644
index 0000000000000..601c270ab7680
--- /dev/null
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bookkeeper.mledger;
+
+import lombok.AllArgsConstructor;
+import lombok.Data;
+import lombok.ToString;
+import org.apache.bookkeeper.mledger.proto.MLDataFormats;
+import org.apache.commons.lang.StringUtils;
+
+@Data
+@AllArgsConstructor
+@ToString
+public class MetadataCompressionConfig {
+ MLDataFormats.CompressionType compressionType;
+ long compressSizeThresholdInBytes;
+
+ public MetadataCompressionConfig(String compressionType) throws IllegalArgumentException {
+ this(compressionType, 0);
+ }
+
+ public MetadataCompressionConfig(String compressionType, long compressThreshold) throws IllegalArgumentException {
+ this.compressionType = parseCompressionType(compressionType);
+ this.compressSizeThresholdInBytes = compressThreshold;
+ }
+
+ public static MetadataCompressionConfig noCompression =
+ new MetadataCompressionConfig(MLDataFormats.CompressionType.NONE, 0);
+
+ private MLDataFormats.CompressionType parseCompressionType(String value) throws IllegalArgumentException {
+ if (StringUtils.isEmpty(value)) {
+ return MLDataFormats.CompressionType.NONE;
+ }
+
+ MLDataFormats.CompressionType compressionType;
+ compressionType = MLDataFormats.CompressionType.valueOf(value);
+
+ return compressionType;
+ }
+}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerMetadataUtils.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerMetadataUtils.java
index 8571a36584e2b..4ac409a2e9bfe 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerMetadataUtils.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerMetadataUtils.java
@@ -48,7 +48,9 @@ public final class LedgerMetadataUtils {
private static final String METADATA_PROPERTY_COMPACTEDTO = "pulsar/compactedTo";
private static final String METADATA_PROPERTY_SCHEMAID = "pulsar/schemaId";
- private static final String METADATA_PROPERTY_DELAYED_INDEX_BUCKETID = "pulsar/delayedIndexBucketId";
+ private static final String METADATA_PROPERTY_DELAYED_INDEX_BUCKET_KEY = "pulsar/delayedIndexBucketKey";
+ private static final String METADATA_PROPERTY_DELAYED_INDEX_TOPIC = "pulsar/delayedIndexTopic";
+ private static final String METADATA_PROPERTY_DELAYED_INDEX_CURSOR = "pulsar/delayedIndexCursor";
/**
* Build base metadata for every ManagedLedger.
@@ -108,14 +110,19 @@ public static Map buildMetadataForSchema(String schemaId) {
/**
* Build additional metadata for a delayed message index bucket.
*
- * @param bucketKey key of the delayed message bucket
+ * @param bucketKey key of the delayed message bucket
+ * @param topicName name of the topic
+ * @param cursorName name of the cursor
* @return an immutable map which describes the schema
*/
- public static Map buildMetadataForDelayedIndexBucket(String bucketKey) {
+ public static Map buildMetadataForDelayedIndexBucket(String bucketKey,
+ String topicName, String cursorName) {
return Map.of(
METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR,
METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_DELAYED_INDEX_BUCKET,
- METADATA_PROPERTY_DELAYED_INDEX_BUCKETID, bucketKey.getBytes(StandardCharsets.UTF_8)
+ METADATA_PROPERTY_DELAYED_INDEX_BUCKET_KEY, bucketKey.getBytes(StandardCharsets.UTF_8),
+ METADATA_PROPERTY_DELAYED_INDEX_TOPIC, topicName.getBytes(StandardCharsets.UTF_8),
+ METADATA_PROPERTY_DELAYED_INDEX_CURSOR, cursorName.getBytes(StandardCharsets.UTF_8)
);
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerOffloaderStatsImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerOffloaderStatsImpl.java
index be2895bf81867..5e05e4c8137cd 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerOffloaderStatsImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/LedgerOffloaderStatsImpl.java
@@ -276,17 +276,19 @@ public void run() {
}
@Override
- public void close() throws Exception {
+ public synchronized void close() throws Exception {
if (instance == this && this.closed.compareAndSet(false, true)) {
CollectorRegistry.defaultRegistry.unregister(this.offloadError);
CollectorRegistry.defaultRegistry.unregister(this.offloadRate);
CollectorRegistry.defaultRegistry.unregister(this.readLedgerLatency);
CollectorRegistry.defaultRegistry.unregister(this.writeStorageError);
CollectorRegistry.defaultRegistry.unregister(this.readOffloadError);
+ CollectorRegistry.defaultRegistry.unregister(this.readOffloadBytes);
CollectorRegistry.defaultRegistry.unregister(this.readOffloadRate);
CollectorRegistry.defaultRegistry.unregister(this.readOffloadIndexLatency);
CollectorRegistry.defaultRegistry.unregister(this.readOffloadDataLatency);
- this.offloadAndReadOffloadBytesMap.clear();
+ CollectorRegistry.defaultRegistry.unregister(this.deleteOffloadOps);
+ instance = null;
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
index 34cd26274ffa0..f30f9553e15e0 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
@@ -25,7 +25,6 @@
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.DEFAULT_LEDGER_DELETE_RETRIES;
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.createManagedLedgerException;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Collections2;
@@ -363,8 +362,7 @@ private CompletableFuture computeCursorProperties(
name, copy, lastCursorLedgerStat, new MetaStoreCallback<>() {
@Override
public void operationComplete(Void result, Stat stat) {
- log.info("[{}] Updated ledger cursor: {} properties {}", ledger.getName(),
- name, cursorProperties);
+ log.info("[{}] Updated ledger cursor: {}", ledger.getName(), name);
ManagedCursorImpl.this.cursorProperties = Collections.unmodifiableMap(newProperties);
updateCursorLedgerStat(copy, stat);
updateCursorPropertiesResult.complete(result);
@@ -373,7 +371,7 @@ public void operationComplete(Void result, Stat stat) {
@Override
public void operationFailed(MetaStoreException e) {
log.error("[{}] Error while updating ledger cursor: {} properties {}", ledger.getName(),
- name, cursorProperties, e);
+ name, newProperties, e);
updateCursorPropertiesResult.completeExceptionally(e);
}
});
@@ -1359,7 +1357,7 @@ public void asyncResetCursor(Position newPos, boolean forceReset, AsyncCallbacks
final PositionImpl newPosition = (PositionImpl) newPos;
// order trim and reset operations on a ledger
- ledger.getExecutor().execute(safeRun(() -> {
+ ledger.getExecutor().execute(() -> {
PositionImpl actualPosition = newPosition;
if (!ledger.isValidPosition(actualPosition)
@@ -1376,7 +1374,7 @@ public void asyncResetCursor(Position newPos, boolean forceReset, AsyncCallbacks
}
internalResetCursor(actualPosition, callback);
- }));
+ });
}
@Override
@@ -1476,9 +1474,8 @@ public Set extends Position> asyncReplayEntries(Set extends Position> positi
lock.readLock().lock();
try {
positions.stream()
- .filter(position -> individualDeletedMessages.contains(((PositionImpl) position).getLedgerId(),
- ((PositionImpl) position).getEntryId())
- || ((PositionImpl) position).compareTo(markDeletePosition) <= 0)
+ .filter(position -> ((PositionImpl) position).compareTo(markDeletePosition) <= 0
+ || individualDeletedMessages.contains(position.getLedgerId(), position.getEntryId()))
.forEach(alreadyAcknowledgedPositions::add);
} finally {
lock.readLock().unlock();
@@ -1781,7 +1778,6 @@ long getNumIndividualDeletedEntriesToSkip(long numEntries) {
} finally {
if (r.lowerEndpoint() instanceof PositionImplRecyclable) {
((PositionImplRecyclable) r.lowerEndpoint()).recycle();
- ((PositionImplRecyclable) r.upperEndpoint()).recycle();
}
}
}, recyclePositionRangeConverter);
@@ -2056,7 +2052,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) {
+ "is later.", mdEntry.newPosition, persistentMarkDeletePosition);
}
// run with executor to prevent deadlock
- ledger.getExecutor().execute(safeRun(() -> mdEntry.triggerComplete()));
+ ledger.getExecutor().execute(() -> mdEntry.triggerComplete());
return;
}
@@ -2075,7 +2071,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) {
+ "in progress {} is later.", mdEntry.newPosition, inProgressLatest);
}
// run with executor to prevent deadlock
- ledger.getExecutor().execute(safeRun(() -> mdEntry.triggerComplete()));
+ ledger.getExecutor().execute(() -> mdEntry.triggerComplete());
return;
}
@@ -2237,8 +2233,8 @@ public void asyncDelete(Iterable positions, AsyncCallbacks.DeleteCallb
return;
}
- if (individualDeletedMessages.contains(position.getLedgerId(), position.getEntryId())
- || position.compareTo(markDeletePosition) <= 0) {
+ if (position.compareTo(markDeletePosition) <= 0
+ || individualDeletedMessages.contains(position.getLedgerId(), position.getEntryId())) {
if (config.isDeletionAtBatchIndexLevelEnabled()) {
BitSetRecyclable bitSetRecyclable = batchDeletedIndexes.remove(position);
if (bitSetRecyclable != null) {
@@ -2612,8 +2608,8 @@ private boolean shouldPersistUnackRangesToLedger() {
private void persistPositionMetaStore(long cursorsLedgerId, PositionImpl position, Map properties,
MetaStoreCallback callback, boolean persistIndividualDeletedMessageRanges) {
if (state == State.Closed) {
- ledger.getExecutor().execute(safeRun(() -> callback.operationFailed(new MetaStoreException(
- new CursorAlreadyClosedException(name + " cursor already closed")))));
+ ledger.getExecutor().execute(() -> callback.operationFailed(new MetaStoreException(
+ new CursorAlreadyClosedException(name + " cursor already closed"))));
return;
}
@@ -2720,6 +2716,46 @@ void setReadPosition(Position newReadPositionInt) {
}
}
+ /**
+ * Manually acknowledge all entries in the lost ledger.
+ * - Since this is an uncommon event, we focus on maintainability. So we do not modify
+ * {@link #individualDeletedMessages} and {@link #batchDeletedIndexes}, but call
+ * {@link #asyncDelete(Position, AsyncCallbacks.DeleteCallback, Object)}.
+ * - This method is valid regardless of the consumer ACK type.
+ * - If there is a consumer ack request after this event, it will also work.
+ */
+ @Override
+ public void skipNonRecoverableLedger(final long ledgerId){
+ LedgerInfo ledgerInfo = ledger.getLedgersInfo().get(ledgerId);
+ if (ledgerInfo == null) {
+ return;
+ }
+ lock.writeLock().lock();
+ log.warn("[{}] [{}] Since the ledger [{}] is lost and the autoSkipNonRecoverableData is true, this ledger will"
+ + " be auto acknowledge in subscription", ledger.getName(), name, ledgerId);
+ try {
+ for (int i = 0; i < ledgerInfo.getEntries(); i++) {
+ if (!individualDeletedMessages.contains(ledgerId, i)) {
+ asyncDelete(PositionImpl.get(ledgerId, i), new AsyncCallbacks.DeleteCallback() {
+ @Override
+ public void deleteComplete(Object ctx) {
+ // ignore.
+ }
+
+ @Override
+ public void deleteFailed(ManagedLedgerException ex, Object ctx) {
+ // The method internalMarkDelete already handled the failure operation. We only need to
+ // make sure the memory state is updated.
+ // If the broker crashed, the non-recoverable ledger will be detected again.
+ }
+ }, null);
+ }
+ }
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
// //////////////////////////////////////////////////
void startCreatingNewMetadataLedger() {
@@ -2846,7 +2882,7 @@ private CompletableFuture doCreateNewMetadataLedger() {
return;
}
- ledger.getExecutor().execute(safeRun(() -> {
+ ledger.getExecutor().execute(() -> {
ledger.mbean.endCursorLedgerCreateOp();
if (rc != BKException.Code.OK) {
log.warn("[{}] Error creating ledger for cursor {}: {}", ledger.getName(), name,
@@ -2859,7 +2895,7 @@ private CompletableFuture doCreateNewMetadataLedger() {
log.debug("[{}] Created ledger {} for cursor {}", ledger.getName(), lh.getId(), name);
}
future.complete(lh);
- }));
+ });
}, LedgerMetadataUtils.buildAdditionalMetadataForCursor(name));
return future;
@@ -2912,6 +2948,7 @@ private List buildIndividualDeletedMessageRanges() {
lock.readLock().lock();
try {
if (individualDeletedMessages.isEmpty()) {
+ this.individualDeletedMessagesSerializedSize = 0;
return Collections.emptyList();
}
@@ -3192,7 +3229,7 @@ private void asyncDeleteLedger(final LedgerHandle lh, int retry) {
log.warn("[{}] Failed to delete ledger {}: {}", ledger.getName(), lh.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
- ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteLedger(lh, retry - 1)),
+ ledger.getScheduledExecutor().schedule(() -> asyncDeleteLedger(lh, retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
return;
@@ -3227,7 +3264,7 @@ private void asyncDeleteCursorLedger(int retry) {
log.warn("[{}][{}] Failed to delete ledger {}: {}", ledger.getName(), name, cursorLedger.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
- ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteCursorLedger(retry - 1)),
+ ledger.getScheduledExecutor().schedule(() -> asyncDeleteCursorLedger(retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
}
@@ -3305,9 +3342,8 @@ public LongPairRangeSet getIndividuallyDeletedMessagesSet() {
public boolean isMessageDeleted(Position position) {
checkArgument(position instanceof PositionImpl);
- return individualDeletedMessages.contains(((PositionImpl) position).getLedgerId(),
- ((PositionImpl) position).getEntryId())
- || ((PositionImpl) position).compareTo(markDeletePosition) <= 0;
+ return ((PositionImpl) position).compareTo(markDeletePosition) <= 0
+ || individualDeletedMessages.contains(position.getLedgerId(), position.getEntryId());
}
//this method will return a copy of the position's ack set
@@ -3399,7 +3435,7 @@ public Range getLastIndividualDeletedRange() {
@Override
public void trimDeletedEntries(List entries) {
entries.removeIf(entry -> {
- boolean isDeleted = ((PositionImpl) entry.getPosition()).compareTo(markDeletePosition) <= 0
+ boolean isDeleted = markDeletePosition.compareTo(entry.getLedgerId(), entry.getEntryId()) >= 0
|| individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId());
if (isDeleted) {
entry.release();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
index 9f3fe9bb0c4a7..9107b76c88a28 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
@@ -193,8 +193,9 @@ private ManagedLedgerFactoryImpl(MetadataStoreExtended metadataStore,
this.bookkeeperFactory = bookKeeperGroupFactory;
this.isBookkeeperManaged = isBookkeeperManaged;
this.metadataStore = metadataStore;
- this.store = new MetaStoreImpl(metadataStore, scheduledExecutor, config.getManagedLedgerInfoCompressionType(),
- config.getManagedCursorInfoCompressionType());
+ this.store = new MetaStoreImpl(metadataStore, scheduledExecutor,
+ config.getCompressionConfigForManagedLedgerInfo(),
+ config.getCompressionConfigForManagedCursorInfo());
this.config = config;
this.mbean = new ManagedLedgerFactoryMBeanImpl(this);
this.entryCacheManager = new RangeEntryCacheManagerImpl(this);
@@ -504,9 +505,19 @@ public void openReadOnlyManagedLedgerFailed(ManagedLedgerException exception, Ob
}
void close(ManagedLedger ledger) {
- // Remove the ledger from the internal factory cache
- ledgers.remove(ledger.getName());
- entryCacheManager.removeEntryCache(ledger.getName());
+ // If the future in map is not done or has exceptionally complete, it means that @param-ledger is not in the
+ // map.
+ CompletableFuture ledgerFuture = ledgers.get(ledger.getName());
+ if (ledgerFuture == null || !ledgerFuture.isDone() || ledgerFuture.isCompletedExceptionally()){
+ return;
+ }
+ if (ledgerFuture.join() != ledger){
+ return;
+ }
+ // Remove the ledger from the internal factory cache.
+ if (ledgers.remove(ledger.getName(), ledgerFuture)) {
+ entryCacheManager.removeEntryCache(ledger.getName());
+ }
}
public CompletableFuture shutdownAsync() throws ManagedLedgerException {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
index 3120663f21a72..10f7948f553cb 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
@@ -22,7 +22,6 @@
import static com.google.common.base.Preconditions.checkState;
import static java.lang.Math.min;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BoundType;
import com.google.common.collect.Lists;
@@ -68,6 +67,7 @@
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
+import javax.annotation.Nullable;
import lombok.Getter;
import org.apache.bookkeeper.client.AsyncCallback;
import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
@@ -408,7 +408,7 @@ public void operationComplete(ManagedLedgerInfo mlInfo, Stat stat) {
if (!ledgers.isEmpty()) {
final long id = ledgers.lastKey();
OpenCallback opencb = (rc, lh, ctx1) -> {
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened ledger {}: {}", name, id, BKException.getMessage(rc));
@@ -438,7 +438,7 @@ public void operationComplete(ManagedLedgerInfo mlInfo, Stat stat) {
callback.initializeFailed(createManagedLedgerException(rc));
return;
}
- }));
+ });
};
if (log.isDebugEnabled()) {
@@ -465,16 +465,13 @@ public void operationFailed(MetaStoreException e) {
scheduleTimeoutTask();
}
- protected boolean isLedgersReadonly() {
- return false;
- }
-
protected synchronized void initializeBookKeeper(final ManagedLedgerInitializeLedgerCallback callback) {
if (log.isDebugEnabled()) {
log.debug("[{}] initializing bookkeeper; ledgers {}", name, ledgers);
}
// Calculate total entries and size
+ final List emptyLedgersToBeDeleted = Collections.synchronizedList(new ArrayList<>());
Iterator iterator = ledgers.values().iterator();
while (iterator.hasNext()) {
LedgerInfo li = iterator.next();
@@ -483,9 +480,7 @@ protected synchronized void initializeBookKeeper(final ManagedLedgerInitializeLe
TOTAL_SIZE_UPDATER.addAndGet(this, li.getSize());
} else {
iterator.remove();
- bookKeeper.asyncDeleteLedger(li.getLedgerId(), (rc, ctx) -> {
- log.info("[{}] Deleted empty ledger ledgerId={} rc={}", name, li.getLedgerId(), rc);
- }, null);
+ emptyLedgersToBeDeleted.add(li.getLedgerId());
}
}
@@ -501,6 +496,11 @@ protected synchronized void initializeBookKeeper(final ManagedLedgerInitializeLe
@Override
public void operationComplete(Void v, Stat stat) {
ledgersStat = stat;
+ emptyLedgersToBeDeleted.forEach(ledgerId -> {
+ bookKeeper.asyncDeleteLedger(ledgerId, (rc, ctx) -> {
+ log.info("[{}] Deleted empty ledger ledgerId={} rc={}", name, ledgerId, rc);
+ }, null);
+ });
initializeCursors(callback);
}
@@ -521,7 +521,7 @@ public void operationFailed(MetaStoreException e) {
return;
}
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
mbean.endDataLedgerCreateOp();
if (rc != BKException.Code.OK) {
callback.initializeFailed(createManagedLedgerException(rc));
@@ -550,7 +550,7 @@ public void operationFailed(MetaStoreException e) {
// Save it back to ensure all nodes exist
store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, storeLedgersCb);
- }));
+ });
}, ledgerMetadata);
}
@@ -773,10 +773,10 @@ public void asyncAddEntry(ByteBuf buffer, AddEntryCallback callback, Object ctx)
buffer.retain();
// Jump to specific thread to avoid contention from writers writing from different threads
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, callback, ctx);
internalAsyncAddEntry(addOperation);
- }));
+ });
}
@Override
@@ -789,10 +789,10 @@ public void asyncAddEntry(ByteBuf buffer, int numberOfMessages, AddEntryCallback
buffer.retain();
// Jump to specific thread to avoid contention from writers writing from different threads
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, numberOfMessages, callback, ctx);
internalAsyncAddEntry(addOperation);
- }));
+ });
}
protected synchronized void internalAsyncAddEntry(OpAddEntry addOperation) {
@@ -865,6 +865,13 @@ protected synchronized void internalAsyncAddEntry(OpAddEntry addOperation) {
lastAddEntryTimeMs = System.currentTimeMillis();
}
+ protected void afterFailedAddEntry(int numOfMessages) {
+ if (managedLedgerInterceptor == null) {
+ return;
+ }
+ managedLedgerInterceptor.afterFailedAddEntry(numOfMessages);
+ }
+
protected boolean beforeAddEntry(OpAddEntry addOperation) {
// if no interceptor, just return true to make sure addOperation will be initiate()
if (managedLedgerInterceptor == null) {
@@ -1521,6 +1528,15 @@ private void closeAllCursors(CloseCallback callback, final Object ctx) {
@Override
public synchronized void createComplete(int rc, final LedgerHandle lh, Object ctx) {
+ if (STATE_UPDATER.get(this) == State.Closed) {
+ if (lh != null) {
+ log.warn("[{}] ledger create completed after the managed ledger is closed rc={} ledger={}, so just"
+ + " close this ledger handle.", name, rc, lh != null ? lh.getId() : -1);
+ lh.closeAsync();
+ }
+ return;
+ }
+
if (log.isDebugEnabled()) {
log.debug("[{}] createComplete rc={} ledger={}", name, rc, lh != null ? lh.getId() : -1);
}
@@ -1554,16 +1570,17 @@ public void operationComplete(Void v, Stat stat) {
log.debug("[{}] Updating of ledgers list after create complete. version={}", name, stat);
}
ledgersStat = stat;
- ledgers.put(lh.getId(), newLedger);
- currentLedger = lh;
- currentLedgerEntries = 0;
- currentLedgerSize = 0;
- metadataMutex.unlock();
- updateLedgersIdsComplete();
synchronized (ManagedLedgerImpl.this) {
+ LedgerHandle originalCurrentLedger = currentLedger;
+ ledgers.put(lh.getId(), newLedger);
+ currentLedger = lh;
+ currentLedgerEntries = 0;
+ currentLedgerSize = 0;
+ updateLedgersIdsComplete(originalCurrentLedger);
mbean.addLedgerSwitchLatencySample(System.currentTimeMillis()
- lastLedgerCreationInitiationTimestamp, TimeUnit.MILLISECONDS);
}
+ metadataMutex.unlock();
// May need to update the cursor position
maybeUpdateCursorBeforeTrimmingConsumedLedger();
@@ -1650,8 +1667,15 @@ void createNewOpAddEntryForNewLedger() {
} while (existsOp != null && --pendingSize > 0);
}
- protected synchronized void updateLedgersIdsComplete() {
+ protected synchronized void updateLedgersIdsComplete(@Nullable LedgerHandle originalCurrentLedger) {
STATE_UPDATER.set(this, State.LedgerOpened);
+ // Delete original "currentLedger" if it has been removed from "ledgers".
+ if (originalCurrentLedger != null && !ledgers.containsKey(originalCurrentLedger.getId())){
+ bookKeeper.asyncDeleteLedger(originalCurrentLedger.getId(), (rc, ctx) -> {
+ mbean.endDataLedgerDeleteOp();
+ log.info("[{}] Delete complete for empty ledger {}. rc={}", name, originalCurrentLedger.getId(), rc);
+ }, null);
+ }
updateLastLedgerCreatedTimeAndScheduleRolloverTask();
if (log.isDebugEnabled()) {
@@ -1714,10 +1738,6 @@ synchronized void ledgerClosed(final LedgerHandle lh) {
// The last ledger was empty, so we can discard it
ledgers.remove(lh.getId());
mbean.startDataLedgerDeleteOp();
- bookKeeper.asyncDeleteLedger(lh.getId(), (rc, ctx) -> {
- mbean.endDataLedgerDeleteOp();
- log.info("[{}] Delete complete for empty ledger {}. rc={}", name, lh.getId(), rc);
- }, null);
}
trimConsumedLedgersInBackground();
@@ -1730,6 +1750,13 @@ synchronized void ledgerClosed(final LedgerHandle lh) {
}
}
+ @Override
+ public void skipNonRecoverableLedger(long ledgerId){
+ for (ManagedCursor managedCursor : cursors) {
+ managedCursor.skipNonRecoverableLedger(ledgerId);
+ }
+ }
+
synchronized void createLedgerAfterClosed() {
if (isNeededCreateNewLedgerAfterCloseLedger()) {
log.info("[{}] Creating a new ledger after closed", name);
@@ -1764,22 +1791,26 @@ public void closeComplete(int rc, LedgerHandle lh, Object o) {
+ "acked ledgerId %s", currentLedger.getId(), lh.getId());
if (rc == BKException.Code.OK) {
- log.debug("Successfully closed ledger {}", lh.getId());
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] Successfully closed ledger {}, trigger by rollover full ledger",
+ name, lh.getId());
+ }
} else {
- log.warn("Error when closing ledger {}. Status={}", lh.getId(), BKException.getMessage(rc));
+ log.warn("[{}] Error when closing ledger {}, trigger by rollover full ledger, Status={}",
+ name, lh.getId(), BKException.getMessage(rc));
}
ledgerClosed(lh);
createLedgerAfterClosed();
}
- }, System.nanoTime());
+ }, null);
}
}
@Override
public CompletableFuture asyncFindPosition(Predicate predicate) {
- CompletableFuture future = new CompletableFuture();
+ CompletableFuture future = new CompletableFuture<>();
Long firstLedgerId = ledgers.firstKey();
final PositionImpl startPosition = firstLedgerId == null ? null : new PositionImpl(firstLedgerId, 0);
if (startPosition == null) {
@@ -1792,11 +1823,6 @@ public void findEntryComplete(Position position, Object ctx) {
final Position finalPosition;
if (position == null) {
finalPosition = startPosition;
- if (finalPosition == null) {
- log.warn("[{}] Unable to find position for predicate {}.", name, predicate);
- future.complete(null);
- return;
- }
log.info("[{}] Unable to find position for predicate {}. Use the first position {} instead.", name,
predicate, startPosition);
} else {
@@ -2367,7 +2393,7 @@ void notifyCursors() {
break;
}
- executor.execute(safeRun(waitingCursor::notifyEntriesAvailable));
+ executor.execute(waitingCursor::notifyEntriesAvailable);
}
}
@@ -2378,7 +2404,7 @@ void notifyWaitingEntryCallBacks() {
break;
}
- executor.execute(safeRun(cb::entriesAvailable));
+ executor.execute(cb::entriesAvailable);
}
}
@@ -2425,16 +2451,16 @@ private void trimConsumedLedgersInBackground() {
@Override
public void trimConsumedLedgersInBackground(CompletableFuture> promise) {
- executor.execute(safeRun(() -> internalTrimConsumedLedgers(promise)));
+ executor.execute(() -> internalTrimConsumedLedgers(promise));
}
public void trimConsumedLedgersInBackground(boolean isTruncate, CompletableFuture> promise) {
- executor.execute(safeRun(() -> internalTrimLedgers(isTruncate, promise)));
+ executor.execute(() -> internalTrimLedgers(isTruncate, promise));
}
private void scheduleDeferredTrimming(boolean isTruncate, CompletableFuture> promise) {
- scheduledExecutor.schedule(safeRun(() -> trimConsumedLedgersInBackground(isTruncate, promise)), 100,
- TimeUnit.MILLISECONDS);
+ scheduledExecutor.schedule(() -> trimConsumedLedgersInBackground(isTruncate, promise),
+ 100, TimeUnit.MILLISECONDS);
}
private void maybeOffloadInBackground(CompletableFuture promise) {
@@ -2449,7 +2475,7 @@ private void maybeOffloadInBackground(CompletableFuture promise) {
final long offloadThresholdInSeconds =
Optional.ofNullable(policies.getManagedLedgerOffloadThresholdInSeconds()).orElse(-1L);
if (offloadThresholdInBytes >= 0 || offloadThresholdInSeconds >= 0) {
- executor.execute(safeRun(() -> maybeOffload(offloadThresholdInBytes, offloadThresholdInSeconds, promise)));
+ executor.execute(() -> maybeOffload(offloadThresholdInBytes, offloadThresholdInSeconds, promise));
}
}
@@ -2470,7 +2496,7 @@ private void maybeOffload(long offloadThresholdInBytes, long offloadThresholdInS
}
if (!offloadMutex.tryLock()) {
- scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)),
+ scheduledExecutor.schedule(() -> maybeOffloadInBackground(finalPromise),
100, TimeUnit.MILLISECONDS);
return;
}
@@ -2949,7 +2975,7 @@ private void asyncDeleteLedger(long ledgerId, long retry) {
log.warn("[{}] Ledger was already deleted {}", name, ledgerId);
} else if (rc != BKException.Code.OK) {
log.error("[{}] Error deleting ledger {} : {}", name, ledgerId, BKException.getMessage(rc));
- scheduledExecutor.schedule(safeRun(() -> asyncDeleteLedger(ledgerId, retry - 1)),
+ scheduledExecutor.schedule(() -> asyncDeleteLedger(ledgerId, retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
} else {
if (log.isDebugEnabled()) {
@@ -3253,7 +3279,7 @@ private void tryTransformLedgerInfo(long ledgerId, LedgerInfoTransformation tran
if (!metadataMutex.tryLock()) {
// retry in 100 milliseconds
scheduledExecutor.schedule(
- safeRun(() -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise)), 100,
+ () -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise), 100,
TimeUnit.MILLISECONDS);
} else { // lock acquired
CompletableFuture unlockingPromise = new CompletableFuture<>();
@@ -4004,9 +4030,8 @@ private void scheduleTimeoutTask() {
timeoutSec = timeoutSec <= 0
? Math.max(config.getAddEntryTimeoutSeconds(), config.getReadEntryTimeoutSeconds())
: timeoutSec;
- this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(safeRun(() -> {
- checkTimeouts();
- }), timeoutSec, timeoutSec, TimeUnit.SECONDS);
+ this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(
+ this::checkTimeouts, timeoutSec, timeoutSec, TimeUnit.SECONDS);
}
}
@@ -4329,7 +4354,7 @@ protected void updateLastLedgerCreatedTimeAndScheduleRolloverTask() {
checkLedgerRollTask.cancel(true);
}
this.checkLedgerRollTask = this.scheduledExecutor.schedule(
- safeRun(this::rollCurrentLedgerIfFull), this.maximumRolloverTimeMs, TimeUnit.MILLISECONDS);
+ this::rollCurrentLedgerIfFull, this.maximumRolloverTimeMs, TimeUnit.MILLISECONDS);
}
}
@@ -4348,7 +4373,26 @@ public void checkInactiveLedgerAndRollOver() {
long currentTimeMs = System.currentTimeMillis();
if (inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + inactiveLedgerRollOverTimeMs)) {
log.info("[{}] Closing inactive ledger, last-add entry {}", name, lastAddEntryTimeMs);
- ledgerClosed(currentLedger);
+ if (STATE_UPDATER.compareAndSet(this, State.LedgerOpened, State.ClosingLedger)) {
+ LedgerHandle currentLedger = this.currentLedger;
+ currentLedger.asyncClose((rc, lh, o) -> {
+ checkArgument(currentLedger.getId() == lh.getId(), "ledgerId %s doesn't match with "
+ + "acked ledgerId %s", currentLedger.getId(), lh.getId());
+
+ if (rc == BKException.Code.OK) {
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] Successfully closed ledger {}, trigger by inactive ledger check",
+ name, lh.getId());
+ }
+ } else {
+ log.warn("[{}] Error when closing ledger {}, trigger by inactive ledger check, Status={}",
+ name, lh.getId(), BKException.getMessage(rc));
+ }
+
+ ledgerClosed(lh);
+ // we do not create ledger here, since topic is inactive for a long time.
+ }, null);
+ }
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
index dad101c9b72d1..e057dee99538e 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
@@ -39,6 +39,7 @@ public class ManagedLedgerMBeanImpl implements ManagedLedgerMXBean {
private final Rate addEntryOpsFailed = new Rate();
private final Rate readEntriesOps = new Rate();
private final Rate readEntriesOpsFailed = new Rate();
+ private final Rate readEntriesOpsCacheMisses = new Rate();
private final Rate markDeleteOps = new Rate();
private final LongAdder dataLedgerOpenOp = new LongAdder();
@@ -72,6 +73,7 @@ public void refreshStats(long period, TimeUnit unit) {
addEntryOpsFailed.calculateRate(seconds);
readEntriesOps.calculateRate(seconds);
readEntriesOpsFailed.calculateRate(seconds);
+ readEntriesOpsCacheMisses.calculateRate(seconds);
markDeleteOps.calculateRate(seconds);
addEntryLatencyStatsUsec.refresh();
@@ -98,6 +100,10 @@ public void recordReadEntriesError() {
readEntriesOpsFailed.recordEvent();
}
+ public void recordReadEntriesOpsCacheMisses(int count, long totalSize) {
+ readEntriesOpsCacheMisses.recordMultipleEvents(count, totalSize);
+ }
+
public void addAddEntryLatencySample(long latency, TimeUnit unit) {
addEntryLatencyStatsUsec.addValue(unit.toMicros(latency));
}
@@ -228,6 +234,11 @@ public long getReadEntriesErrors() {
return readEntriesOpsFailed.getCount();
}
+ @Override
+ public double getReadEntriesOpsCacheMissesRate() {
+ return readEntriesOpsCacheMisses.getRate();
+ }
+
@Override
public double getMarkDeleteRate() {
return markDeleteOps.getRate();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
index bcb73553324dd..d9269ec83b179 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
@@ -23,7 +23,6 @@
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -38,15 +37,15 @@
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetaStoreException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetadataNotFoundException;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo;
-import org.apache.bookkeeper.util.SafeRunnable;
-import org.apache.commons.lang.StringUtils;
import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
import org.apache.pulsar.common.compression.CompressionCodec;
import org.apache.pulsar.common.compression.CompressionCodecProvider;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.metadata.api.MetadataStore;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.api.Notification;
@@ -63,50 +62,35 @@ public class MetaStoreImpl implements MetaStore, Consumer {
private final OrderedExecutor executor;
private static final int MAGIC_MANAGED_INFO_METADATA = 0x4778; // 0100 0111 0111 1000
- private final CompressionType ledgerInfoCompressionType;
- private final CompressionType cursorInfoCompressionType;
+ private final MetadataCompressionConfig ledgerInfoCompressionConfig;
+ private final MetadataCompressionConfig cursorInfoCompressionConfig;
private final Map> managedLedgerInfoUpdateCallbackMap;
public MetaStoreImpl(MetadataStore store, OrderedExecutor executor) {
this.store = store;
this.executor = executor;
- this.ledgerInfoCompressionType = CompressionType.NONE;
- this.cursorInfoCompressionType = CompressionType.NONE;
+ this.ledgerInfoCompressionConfig = MetadataCompressionConfig.noCompression;
+ this.cursorInfoCompressionConfig = MetadataCompressionConfig.noCompression;
managedLedgerInfoUpdateCallbackMap = new ConcurrentHashMap<>();
if (store != null) {
store.registerListener(this);
}
}
- public MetaStoreImpl(MetadataStore store, OrderedExecutor executor, String ledgerInfoCompressionType,
- String cursorInfoCompressionType) {
+ public MetaStoreImpl(MetadataStore store, OrderedExecutor executor,
+ MetadataCompressionConfig ledgerInfoCompressionConfig,
+ MetadataCompressionConfig cursorInfoCompressionConfig) {
this.store = store;
this.executor = executor;
- this.ledgerInfoCompressionType = parseCompressionType(ledgerInfoCompressionType);
- this.cursorInfoCompressionType = parseCompressionType(cursorInfoCompressionType);
+ this.ledgerInfoCompressionConfig = ledgerInfoCompressionConfig;
+ this.cursorInfoCompressionConfig = cursorInfoCompressionConfig;
managedLedgerInfoUpdateCallbackMap = new ConcurrentHashMap<>();
if (store != null) {
store.registerListener(this);
}
}
- private CompressionType parseCompressionType(String value) {
- if (StringUtils.isEmpty(value)) {
- return CompressionType.NONE;
- }
-
- CompressionType compressionType;
- try {
- compressionType = CompressionType.valueOf(value);
- } catch (Exception e) {
- log.error("Failed to get compression type {} error msg: {}.", value, e.getMessage());
- throw e;
- }
-
- return compressionType;
- }
-
@Override
public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map properties,
MetaStoreCallback callback) {
@@ -155,7 +139,7 @@ public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map
.exceptionally(ex -> {
try {
executor.executeOrdered(ledgerName,
- SafeRunnable.safeRun(() -> callback.operationFailed(getException(ex))));
+ () -> callback.operationFailed(getException(ex)));
} catch (RejectedExecutionException e) {
//executor maybe shutdown, use common pool to run callback.
CompletableFuture.runAsync(() -> callback.operationFailed(getException(ex)));
@@ -182,7 +166,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
@Override
public void operationFailed(MetaStoreException e) {
if (e instanceof MetadataNotFoundException) {
- result.complete(Collections.emptyMap());
+ result.complete(new HashMap<>());
} else {
result.completeExceptionally(e);
}
@@ -203,8 +187,8 @@ public void asyncUpdateLedgerIds(String ledgerName, ManagedLedgerInfo mlInfo, St
.thenAcceptAsync(newVersion -> callback.operationComplete(null, newVersion),
executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -220,8 +204,8 @@ public void getCursors(String ledgerName, MetaStoreCallback> callba
.thenAcceptAsync(cursors -> callback.operationComplete(cursors, null), executor
.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -248,8 +232,8 @@ public void asyncGetCursorInfo(String ledgerName, String cursorName,
}
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -283,8 +267,8 @@ public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedC
.thenAcceptAsync(optStat -> callback.operationComplete(null, optStat), executor
.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -292,7 +276,7 @@ public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedC
@Override
public void asyncRemoveCursor(String ledgerName, String cursorName, MetaStoreCallback callback) {
String path = PREFIX + ledgerName + "/" + cursorName;
- log.info("[{}] Remove consumer={}", ledgerName, cursorName);
+ log.info("[{}] Remove cursor={}", ledgerName, cursorName);
store.delete(path, Optional.empty())
.thenAcceptAsync(v -> {
@@ -302,8 +286,15 @@ public void asyncRemoveCursor(String ledgerName, String cursorName, MetaStoreCal
callback.operationComplete(null, null);
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName, () -> {
+ Throwable actEx = FutureUtil.unwrapCompletionException(ex);
+ if (actEx instanceof MetadataStoreException.NotFoundException){
+ log.info("[{}] [{}] cursor delete done because it did not exist.", ledgerName, cursorName);
+ callback.operationComplete(null, null);
+ return;
+ }
+ callback.operationFailed(getException(ex));
+ });
return null;
});
}
@@ -321,8 +312,8 @@ public void removeManagedLedger(String ledgerName, MetaStoreCallback callb
callback.operationComplete(null, null);
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -415,29 +406,43 @@ private static MetaStoreException getException(Throwable t) {
}
public byte[] compressLedgerInfo(ManagedLedgerInfo managedLedgerInfo) {
- if (ledgerInfoCompressionType.equals(CompressionType.NONE)) {
+ CompressionType compressionType = ledgerInfoCompressionConfig.getCompressionType();
+ if (compressionType.equals(CompressionType.NONE)) {
return managedLedgerInfo.toByteArray();
}
- MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata
- .newBuilder()
- .setCompressionType(ledgerInfoCompressionType)
- .setUncompressedSize(managedLedgerInfo.getSerializedSize())
- .build();
- return compressManagedInfo(managedLedgerInfo.toByteArray(), mlInfoMetadata.toByteArray(),
- mlInfoMetadata.getSerializedSize(), ledgerInfoCompressionType);
+
+ int uncompressedSize = managedLedgerInfo.getSerializedSize();
+ if (uncompressedSize > ledgerInfoCompressionConfig.getCompressSizeThresholdInBytes()) {
+ MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata
+ .newBuilder()
+ .setCompressionType(compressionType)
+ .setUncompressedSize(uncompressedSize)
+ .build();
+ return compressManagedInfo(managedLedgerInfo.toByteArray(), mlInfoMetadata.toByteArray(),
+ mlInfoMetadata.getSerializedSize(), compressionType);
+ }
+
+ return managedLedgerInfo.toByteArray();
}
public byte[] compressCursorInfo(ManagedCursorInfo managedCursorInfo) {
- if (cursorInfoCompressionType.equals(CompressionType.NONE)) {
+ CompressionType compressionType = cursorInfoCompressionConfig.getCompressionType();
+ if (compressionType.equals(CompressionType.NONE)) {
return managedCursorInfo.toByteArray();
}
- MLDataFormats.ManagedCursorInfoMetadata metadata = MLDataFormats.ManagedCursorInfoMetadata
- .newBuilder()
- .setCompressionType(cursorInfoCompressionType)
- .setUncompressedSize(managedCursorInfo.getSerializedSize())
- .build();
- return compressManagedInfo(managedCursorInfo.toByteArray(), metadata.toByteArray(),
- metadata.getSerializedSize(), cursorInfoCompressionType);
+
+ int uncompressedSize = managedCursorInfo.getSerializedSize();
+ if (uncompressedSize > cursorInfoCompressionConfig.getCompressSizeThresholdInBytes()) {
+ MLDataFormats.ManagedCursorInfoMetadata metadata = MLDataFormats.ManagedCursorInfoMetadata
+ .newBuilder()
+ .setCompressionType(compressionType)
+ .setUncompressedSize(uncompressedSize)
+ .build();
+ return compressManagedInfo(managedCursorInfo.toByteArray(), metadata.toByteArray(),
+ metadata.getSerializedSize(), compressionType);
+ }
+
+ return managedCursorInfo.toByteArray();
}
public ManagedLedgerInfo parseManagedLedgerInfo(byte[] data) throws InvalidProtocolBufferException {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java
index 5ac8260c7920c..9d2829b1707f4 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java
@@ -75,7 +75,7 @@ private void recoverCursor(PositionImpl mdPosition) {
// Initialize the counter such that the difference between the messages written on the ML and the
// messagesConsumed is equal to the current backlog (negated).
if (null != this.readPosition) {
- long initialBacklog = readPosition.compareTo(lastEntryAndCounter.getLeft()) < 0
+ long initialBacklog = readPosition.compareTo(lastEntryAndCounter.getLeft()) <= 0
? ledger.getNumberOfEntries(Range.closed(readPosition, lastEntryAndCounter.getLeft())) : 0;
messagesConsumedCounter = lastEntryAndCounter.getRight() - initialBacklog;
} else {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
index 14135b037920a..ae2beafb64374 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
@@ -35,8 +35,6 @@
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.Position;
import org.apache.bookkeeper.mledger.intercept.ManagedLedgerInterceptor;
-import org.apache.bookkeeper.mledger.util.SafeRun;
-import org.apache.bookkeeper.util.SafeRunnable;
/**
@@ -44,7 +42,7 @@
*
*/
@Slf4j
-public class OpAddEntry extends SafeRunnable implements AddCallback, CloseCallback {
+public class OpAddEntry implements AddCallback, CloseCallback, Runnable {
protected ManagedLedgerImpl ml;
LedgerHandle ledger;
private long entryId;
@@ -125,17 +123,22 @@ public void setCloseWhenDone(boolean closeWhenDone) {
public void initiate() {
if (STATE_UPDATER.compareAndSet(OpAddEntry.this, State.OPEN, State.INITIATED)) {
-
ByteBuf duplicateBuffer = data.retainedDuplicate();
// internally asyncAddEntry() will take the ownership of the buffer and release it at the end
addOpCount = ManagedLedgerImpl.ADD_OP_COUNT_UPDATER.incrementAndGet(ml);
lastInitTime = System.nanoTime();
if (ml.getManagedLedgerInterceptor() != null) {
+ long originalDataLen = data.readableBytes();
payloadProcessorHandle = ml.getManagedLedgerInterceptor().processPayloadBeforeLedgerWrite(this,
duplicateBuffer);
if (payloadProcessorHandle != null) {
duplicateBuffer = payloadProcessorHandle.getProcessedPayload();
+ // If data len of entry changes, correct "dataLength" and "currentLedgerSize".
+ if (originalDataLen != duplicateBuffer.readableBytes()) {
+ this.dataLength = duplicateBuffer.readableBytes();
+ this.ml.currentLedgerSize += (dataLength - originalDataLen);
+ }
}
}
ledger.asyncAddEntry(duplicateBuffer, this, addOpCount);
@@ -157,6 +160,7 @@ public void initiateShadowWrite() {
public void failed(ManagedLedgerException e) {
AddEntryCallback cb = callbackUpdater.getAndSet(this, null);
+ ml.afterFailedAddEntry(this.getNumberOfMessages());
if (cb != null) {
ReferenceCountUtil.release(data);
cb.addFailed(e, ctx);
@@ -206,7 +210,7 @@ public void addComplete(int rc, final LedgerHandle lh, long entryId, Object ctx)
// Called in executor hashed on managed ledger name, once the add operation is complete
@Override
- public void safeRun() {
+ public void run() {
if (payloadProcessorHandle != null) {
payloadProcessorHandle.release();
}
@@ -322,11 +326,11 @@ void handleAddFailure(final LedgerHandle lh) {
ManagedLedgerImpl finalMl = this.ml;
finalMl.mbean.recordAddEntryError();
- finalMl.getExecutor().execute(SafeRun.safeRun(() -> {
+ finalMl.getExecutor().execute(() -> {
// Force the creation of a new ledger. Doing it in a background thread to avoid acquiring ML lock
// from a BK callback.
finalMl.ledgerClosed(lh);
- }));
+ });
}
void close() {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
index 81b14359514b9..7b59c3903d5bc 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import java.util.ArrayList;
@@ -108,18 +107,20 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
if (!entries.isEmpty()) {
// There were already some entries that were read before, we can return them
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
callback.readEntriesComplete(entries, ctx);
recycle();
- }));
+ });
} else if (cursor.config.isAutoSkipNonRecoverableData() && exception instanceof NonRecoverableLedgerException) {
log.warn("[{}][{}] read failed from ledger at position:{} : {}", cursor.ledger.getName(), cursor.getName(),
readPosition, exception.getMessage());
final ManagedLedgerImpl ledger = (ManagedLedgerImpl) cursor.getManagedLedger();
Position nexReadPosition;
+ Long lostLedger = null;
if (exception instanceof ManagedLedgerException.LedgerNotExistException) {
// try to find and move to next valid ledger
nexReadPosition = cursor.getNextLedgerPosition(readPosition.getLedgerId());
+ lostLedger = readPosition.ledgerId;
} else {
// Skip this read operation
nexReadPosition = ledger.getValidPositionAfterSkippedEntries(readPosition, count);
@@ -132,6 +133,9 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
return;
}
updateReadPosition(nexReadPosition);
+ if (lostLedger != null) {
+ cursor.getManagedLedger().skipNonRecoverableLedger(lostLedger);
+ }
checkReadCompletion();
} else {
if (!(exception instanceof TooManyRequestsException)) {
@@ -161,20 +165,20 @@ void checkReadCompletion() {
&& maxPosition.compareTo(readPosition) > 0) {
// We still have more entries to read from the next ledger, schedule a new async operation
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
readPosition = cursor.ledger.startReadOperationOnLedger(nextReadPosition);
cursor.ledger.asyncReadEntries(OpReadEntry.this);
- }));
+ });
} else {
// The reading was already completed, release resources and trigger callback
try {
cursor.readOperationCompleted();
} finally {
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
callback.readEntriesComplete(entries, ctx);
recycle();
- }));
+ });
}
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
index 39e7b6b42ec0b..b33dd87543f77 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
@@ -19,7 +19,6 @@
package org.apache.bookkeeper.mledger.impl;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -30,6 +29,7 @@
import org.apache.bookkeeper.client.AsyncCallback;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BookKeeper;
+import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.bookkeeper.mledger.AsyncCallbacks;
import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
@@ -65,13 +65,13 @@ public ShadowManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper book
@Override
synchronized void initialize(ManagedLedgerInitializeLedgerCallback callback, Object ctx) {
log.info("Opening shadow managed ledger {} with source={}", name, sourceMLName);
- executor.execute(safeRun(() -> doInitialize(callback, ctx)));
+ executor.execute(() -> doInitialize(callback, ctx));
}
private void doInitialize(ManagedLedgerInitializeLedgerCallback callback, Object ctx) {
// Fetch the list of existing ledgers in the source managed ledger
store.watchManagedLedgerInfo(sourceMLName, (managedLedgerInfo, stat) ->
- executor.execute(safeRun(() -> processSourceManagedLedgerInfo(managedLedgerInfo, stat)))
+ executor.execute(() -> processSourceManagedLedgerInfo(managedLedgerInfo, stat))
);
store.getManagedLedgerInfo(sourceMLName, false, null, new MetaStore.MetaStoreCallback<>() {
@Override
@@ -105,7 +105,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
final long lastLedgerId = ledgers.lastKey();
mbean.startDataLedgerOpenOp();
- AsyncCallback.OpenCallback opencb = (rc, lh, ctx1) -> executor.execute(safeRun(() -> {
+ AsyncCallback.OpenCallback opencb = (rc, lh, ctx1) -> executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened source ledger {}", name, lastLedgerId);
@@ -144,7 +144,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
BKException.getMessage(rc));
callback.initializeFailed(createManagedLedgerException(rc));
}
- }));
+ });
//open ledger in readonly mode.
bookKeeper.asyncOpenLedgerNoRecovery(lastLedgerId, digestType, config.getPassword(), opencb, null);
@@ -161,11 +161,6 @@ public void operationFailed(ManagedLedgerException.MetaStoreException e) {
});
}
- @Override
- protected boolean isLedgersReadonly() {
- return true;
- }
-
@Override
protected synchronized void initializeBookKeeper(ManagedLedgerInitializeLedgerCallback callback) {
if (log.isDebugEnabled()) {
@@ -321,7 +316,7 @@ private synchronized void processSourceManagedLedgerInfo(MLDataFormats.ManagedLe
mbean.startDataLedgerOpenOp();
//open ledger in readonly mode.
bookKeeper.asyncOpenLedgerNoRecovery(lastLedgerId, digestType, config.getPassword(),
- (rc, lh, ctx1) -> executor.execute(safeRun(() -> {
+ (rc, lh, ctx1) -> executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened new source ledger {}", name, lastLedgerId);
@@ -337,7 +332,7 @@ private synchronized void processSourceManagedLedgerInfo(MLDataFormats.ManagedLe
currentLedgerEntries = 0;
currentLedgerSize = 0;
initLastConfirmedEntry();
- updateLedgersIdsComplete();
+ updateLedgersIdsComplete(null);
maybeUpdateCursorBeforeTrimmingConsumedLedger();
} else if (isNoSuchLedgerExistsException(rc)) {
log.warn("[{}] Source ledger not found: {}", name, lastLedgerId);
@@ -346,7 +341,7 @@ private synchronized void processSourceManagedLedgerInfo(MLDataFormats.ManagedLe
log.error("[{}] Failed to open source ledger {}: {}", name, lastLedgerId,
BKException.getMessage(rc));
}
- })), null);
+ }), null);
}
//handle old ledgers deleted.
@@ -370,7 +365,7 @@ public synchronized void asyncClose(AsyncCallbacks.CloseCallback callback, Objec
}
@Override
- protected synchronized void updateLedgersIdsComplete() {
+ protected synchronized void updateLedgersIdsComplete(LedgerHandle originalCurrentLedger) {
STATE_UPDATER.set(this, State.LedgerOpened);
updateLastLedgerCreatedTimeAndScheduleRolloverTask();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
index 1c5563b38b120..d1050e0062826 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
@@ -93,6 +93,7 @@ public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boole
} finally {
ledgerEntries.close();
}
+ ml.getMbean().recordReadEntriesOpsCacheMisses(entries.size(), totalSize);
ml.getFactory().getMbean().recordCacheMiss(entries.size(), totalSize);
ml.getMbean().addReadEntriesSample(entries.size(), totalSize);
@@ -120,6 +121,7 @@ public void asyncReadEntry(ReadHandle lh, PositionImpl position, AsyncCallbacks.
LedgerEntry ledgerEntry = iterator.next();
EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor);
+ ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength());
ml.getFactory().getMbean().recordCacheMiss(1, returnEntry.getLength());
ml.getMbean().addReadEntriesSample(1, returnEntry.getLength());
callback.readEntryComplete(returnEntry, ctx);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
index 28a2f00cf683c..27aec6f178e39 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
@@ -256,6 +256,7 @@ private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEnt
LedgerEntry ledgerEntry = iterator.next();
EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor);
+ ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength());
manager.mlFactoryMBean.recordCacheMiss(1, returnEntry.getLength());
ml.getMbean().addReadEntriesSample(1, returnEntry.getLength());
callback.readEntryComplete(returnEntry, ctx);
@@ -449,6 +450,7 @@ CompletableFuture> readFromStorage(ReadHandle lh,
}
}
+ ml.getMbean().recordReadEntriesOpsCacheMisses(entriesToReturn.size(), totalSize);
manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize);
ml.getMbean().addReadEntriesSample(entriesToReturn.size(), totalSize);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
index 080c70b5873cd..d5a3019855cb5 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl.cache;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import java.util.concurrent.ConcurrentHashMap;
@@ -116,7 +115,7 @@ boolean hasSpaceInCache() {
// Trigger a single eviction in background. While the eviction is running we stop inserting entries in the cache
if (currentSize > evictionTriggerThreshold && evictionInProgress.compareAndSet(false, true)) {
- mlFactory.getScheduledExecutor().execute(safeRun(() -> {
+ mlFactory.getScheduledExecutor().execute(() -> {
// Trigger a new cache eviction cycle to bring the used memory below the cacheEvictionWatermark
// percentage limit
long sizeToEvict = currentSize - (long) (maxSize * cacheEvictionWatermark);
@@ -136,7 +135,7 @@ boolean hasSpaceInCache() {
mlFactoryMBean.recordCacheEviction();
evictionInProgress.set(false);
}
- }));
+ });
}
return currentSize < maxSize;
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/intercept/ManagedLedgerInterceptor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/intercept/ManagedLedgerInterceptor.java
index 412655594c770..d26a5e15735aa 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/intercept/ManagedLedgerInterceptor.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/intercept/ManagedLedgerInterceptor.java
@@ -41,6 +41,14 @@ public interface ManagedLedgerInterceptor {
*/
OpAddEntry beforeAddEntry(OpAddEntry op, int numberOfMessages);
+ /**
+ * Intercept When add entry failed.
+ * @param numberOfMessages
+ */
+ default void afterFailedAddEntry(int numberOfMessages){
+
+ }
+
/**
* Intercept when ManagedLedger is initialized.
* @param propertiesMap map of properties.
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/Futures.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/Futures.java
index dc1d1eb6c9ac5..f5ad77a71d8c4 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/Futures.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/Futures.java
@@ -24,6 +24,7 @@
import java.util.function.Supplier;
import org.apache.bookkeeper.mledger.AsyncCallbacks.CloseCallback;
import org.apache.bookkeeper.mledger.ManagedLedgerException;
+import org.apache.pulsar.common.util.FutureUtil;
/**
* Conveniences to use with {@link CompletableFuture}.
@@ -78,7 +79,8 @@ public static CompletableFuture executeWithRetry(Supplier 0) {
+ Throwable throwable = FutureUtil.unwrapCompletionException(ex);
+ if (needRetryExceptionClass.isAssignableFrom(throwable.getClass()) && maxRetryTimes > 0) {
executeWithRetry(op, needRetryExceptionClass, maxRetryTimes - 1).whenComplete((res2, ex2) -> {
if (ex2 == null) {
resultFuture.complete(res2);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/PositionAckSetUtil.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/PositionAckSetUtil.java
index 336c3a69e45c1..1c607582076a8 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/PositionAckSetUtil.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/PositionAckSetUtil.java
@@ -45,12 +45,25 @@ public static void andAckSet(PositionImpl currentPosition, PositionImpl otherPos
if (currentPosition == null || otherPosition == null) {
return;
}
- BitSetRecyclable thisAckSet = BitSetRecyclable.valueOf(currentPosition.getAckSet());
- BitSetRecyclable otherAckSet = BitSetRecyclable.valueOf(otherPosition.getAckSet());
+ currentPosition.setAckSet(andAckSet(currentPosition.getAckSet(), otherPosition.getAckSet()));
+ }
+
+ //This method is do `and` operation for ack set
+ public static long[] andAckSet(long[] firstAckSet, long[] secondAckSet) {
+ BitSetRecyclable thisAckSet = BitSetRecyclable.valueOf(firstAckSet);
+ BitSetRecyclable otherAckSet = BitSetRecyclable.valueOf(secondAckSet);
thisAckSet.and(otherAckSet);
- currentPosition.setAckSet(thisAckSet.toLongArray());
+ long[] ackSet = thisAckSet.toLongArray();
thisAckSet.recycle();
otherAckSet.recycle();
+ return ackSet;
+ }
+
+ public static boolean isAckSetEmpty(long[] ackSet) {
+ BitSetRecyclable bitSet = BitSetRecyclable.create().resetWords(ackSet);
+ boolean isEmpty = bitSet.isEmpty();
+ bitSet.recycle();
+ return isEmpty;
}
//This method is compare two position which position is bigger than another one.
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java
deleted file mode 100644
index 570cb7ae735ab..0000000000000
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.bookkeeper.mledger.util;
-
-import java.util.function.Consumer;
-import org.apache.bookkeeper.util.SafeRunnable;
-
-/**
- * Static builders for {@link SafeRunnable}s.
- */
-public class SafeRun {
- public static SafeRunnable safeRun(Runnable runnable) {
- return new SafeRunnable() {
- @Override
- public void safeRun() {
- runnable.run();
- }
- };
- }
-
- /**
- *
- * @param runnable
- * @param exceptionHandler
- * handler that will be called when there are any exception
- * @return
- */
- public static SafeRunnable safeRun(Runnable runnable, Consumer exceptionHandler) {
- return new SafeRunnable() {
- @Override
- public void safeRun() {
- try {
- runnable.run();
- } catch (Throwable t) {
- exceptionHandler.accept(t);
- throw t;
- }
- }
- };
- }
-}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
index 3fa0234e13a55..7558f07db76ca 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNull;
@@ -383,7 +382,7 @@ public void testConcurrentIndividualDeletesWithGetNthEntry() throws Exception {
final AtomicInteger iteration = new AtomicInteger(0);
for (int i = 0; i < deleteEntries; i++) {
- executor.submit(safeRun(() -> {
+ executor.submit(() -> {
try {
cursor.asyncDelete(addedEntries.get(iteration.getAndIncrement()), new DeleteCallback() {
@Override
@@ -403,7 +402,7 @@ public void deleteFailed(ManagedLedgerException exception, Object ctx) {
} finally {
counter.countDown();
}
- }));
+ });
}
counter.await();
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
index 08d8fd939a01d..70ba4b543ec09 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
@@ -19,11 +19,13 @@
package org.apache.bookkeeper.mledger.impl;
import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
import static org.testng.Assert.expectThrows;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.pulsar.common.api.proto.CompressionType;
import org.testng.annotations.DataProvider;
@@ -49,16 +51,14 @@ private Object[][] compressionTypeProvider() {
};
}
- @Test(dataProvider = "compressionTypeProvider")
- public void testEncodeAndDecode(String compressionType) throws IOException {
- long ledgerId = 10000;
+ private MLDataFormats.ManagedCursorInfo.Builder generateManagedCursorInfo(long ledgerId, int positionNumber) {
MLDataFormats.ManagedCursorInfo.Builder builder = MLDataFormats.ManagedCursorInfo.newBuilder();
builder.setCursorsLedgerId(ledgerId);
builder.setMarkDeleteLedgerId(ledgerId);
List batchedEntryDeletionIndexInfos = new ArrayList<>();
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < positionNumber; i++) {
MLDataFormats.NestedPositionInfo nestedPositionInfo = MLDataFormats.NestedPositionInfo.newBuilder()
.setEntryId(i).setLedgerId(i).build();
MLDataFormats.BatchedEntryDeletionIndexInfo batchedEntryDeletionIndexInfo = MLDataFormats
@@ -67,17 +67,24 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
}
builder.addAllBatchedEntryDeletionIndexInfo(batchedEntryDeletionIndexInfos);
+ return builder;
+ }
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testEncodeAndDecode(String compressionType) throws IOException {
+ long ledgerId = 10000;
+ MLDataFormats.ManagedCursorInfo.Builder builder = generateManagedCursorInfo(ledgerId, 1000);
MetaStoreImpl metaStore;
if (INVALID_TYPE.equals(compressionType)) {
IllegalArgumentException compressionTypeEx = expectThrows(IllegalArgumentException.class, () -> {
- new MetaStoreImpl(null, null, null, compressionType);
+ new MetaStoreImpl(null, null, null, new MetadataCompressionConfig(compressionType));
});
assertEquals(compressionTypeEx.getMessage(),
"No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ compressionType);
return;
} else {
- metaStore = new MetaStoreImpl(null, null, null, compressionType);
+ metaStore = new MetaStoreImpl(null, null, null, new MetadataCompressionConfig(compressionType));
}
MLDataFormats.ManagedCursorInfo managedCursorInfo = builder.build();
@@ -93,4 +100,42 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
MLDataFormats.ManagedCursorInfo info2 = metaStore.parseManagedCursorInfo(managedCursorInfo.toByteArray());
assertEquals(info1, info2);
}
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testCompressionThreshold(String compressionType) throws IOException {
+ int compressThreshold = 512;
+
+ long ledgerId = 10000;
+ // should not compress
+ MLDataFormats.ManagedCursorInfo smallInfo = generateManagedCursorInfo(ledgerId, 1).build();
+ assertTrue(smallInfo.getSerializedSize() < compressThreshold);
+
+ // should compress
+ MLDataFormats.ManagedCursorInfo bigInfo = generateManagedCursorInfo(ledgerId, 1000).build();
+ assertTrue(bigInfo.getSerializedSize() > compressThreshold);
+
+ MetaStoreImpl metaStore;
+ if (INVALID_TYPE.equals(compressionType)) {
+ IllegalArgumentException compressionTypeEx = expectThrows(IllegalArgumentException.class, () -> {
+ new MetaStoreImpl(null, null, null,
+ new MetadataCompressionConfig(compressionType, compressThreshold));
+ });
+ assertEquals(compressionTypeEx.getMessage(),
+ "No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ + compressionType);
+ return;
+ } else {
+ metaStore = new MetaStoreImpl(null, null, null,
+ new MetadataCompressionConfig(compressionType, compressThreshold));
+ }
+
+ byte[] compressionBytes = metaStore.compressCursorInfo(smallInfo);
+ // not compressed
+ assertEquals(compressionBytes.length, smallInfo.getSerializedSize());
+
+
+ byte[] compressionBigBytes = metaStore.compressCursorInfo(bigInfo);
+ // compressed
+ assertTrue(compressionBigBytes.length != smallInfo.getSerializedSize());
+ }
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
index 8dc726c249efc..1b1b5534256f9 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
@@ -48,6 +48,7 @@
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
@@ -95,6 +96,7 @@
import org.apache.bookkeeper.test.MockedBookKeeperTestCase;
import org.apache.pulsar.common.api.proto.CommandSubscribe;
import org.apache.pulsar.common.api.proto.IntRange;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.common.util.collections.BitSetRecyclable;
import org.apache.pulsar.common.util.collections.LongPairRangeSet;
import org.apache.pulsar.metadata.api.MetadataStoreException;
@@ -1060,6 +1062,21 @@ void removingCursor() throws Exception {
Awaitility.await().until(() -> ledger.getNumberOfEntries() <= 2);
}
+ @Test(timeOut = 10000)
+ void testRemoveCursorFail() throws Exception {
+ String mlName = UUID.randomUUID().toString().replaceAll("-", "");
+ String cursorName = "c1";
+ ManagedLedger ledger = factory.open(mlName);
+ ledger.openCursor(cursorName);
+ metadataStore.setAlwaysFail(new MetadataStoreException("123"));
+ try {
+ ledger.deleteCursor(cursorName);
+ fail("expected delete cursor failure.");
+ } catch (Exception ex) {
+ assertTrue(FutureUtil.unwrapCompletionException(ex).getMessage().contains("123"));
+ }
+ }
+
@Test(timeOut = 20000)
void cursorPersistence() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java
index eac22d2469b17..512e90d17f5e8 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java
@@ -34,6 +34,8 @@
import java.util.concurrent.atomic.AtomicReference;
import lombok.Cleanup;
import org.apache.bookkeeper.client.BKException;
+import org.apache.bookkeeper.client.BookKeeper;
+import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.client.api.DigestType;
import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.CloseCallback;
@@ -48,6 +50,7 @@
import org.apache.bookkeeper.test.MockedBookKeeperTestCase;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore;
+import org.awaitility.Awaitility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
@@ -511,9 +514,10 @@ public void recoverAfterWriteError() throws Exception {
public void recoverLongTimeAfterMultipleWriteErrors() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("recoverLongTimeAfterMultipleWriteErrors");
ManagedCursor cursor = ledger.openCursor("c1");
+ LedgerHandle firstLedger = ledger.currentLedger;
- bkc.failAfter(0, BKException.Code.BookieHandleNotAvailableException);
- bkc.failAfter(1, BKException.Code.BookieHandleNotAvailableException);
+ bkc.addEntryFailAfter(0, BKException.Code.BookieHandleNotAvailableException);
+ bkc.addEntryFailAfter(1, BKException.Code.BookieHandleNotAvailableException);
CountDownLatch counter = new CountDownLatch(2);
AtomicReference ex = new AtomicReference<>();
@@ -540,6 +544,18 @@ public void addFailed(ManagedLedgerException exception, Object ctx) {
counter.await();
assertNull(ex.get());
+ Awaitility.await().untilAsserted(() -> {
+ try {
+ bkc.openLedger(firstLedger.getId(),
+ BookKeeper.DigestType.fromApiDigestType(ledger.getConfig().getDigestType()),
+ ledger.getConfig().getPassword());
+ fail("The expected behavior is that the first ledger will be deleted, but it still exists.");
+ } catch (Exception ledgerDeletedEx){
+ // Expected LedgerNotExistsEx: the first ledger will be deleted after add entry fail.
+ assertTrue(ledgerDeletedEx instanceof BKException.BKNoSuchLedgerExistsException);
+ }
+ });
+
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 2);
// Ensure that we are only creating one new ledger
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java
index d49d9ab3e2b6b..8695759c99f62 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java
@@ -20,12 +20,16 @@
import static org.testng.Assert.assertEquals;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.mledger.ManagedCursor;
import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
import org.apache.bookkeeper.mledger.ManagedLedgerInfo;
import org.apache.bookkeeper.mledger.ManagedLedgerInfo.CursorInfo;
import org.apache.bookkeeper.mledger.ManagedLedgerInfo.MessageRangeInfo;
import org.apache.bookkeeper.test.MockedBookKeeperTestCase;
+import org.awaitility.Awaitility;
+import org.testng.Assert;
import org.testng.annotations.Test;
public class ManagedLedgerFactoryTest extends MockedBookKeeperTestCase {
@@ -71,4 +75,43 @@ public void testGetManagedLedgerInfoWithClose() throws Exception {
assertEquals(mri.to.entryId, 0);
}
+ /**
+ * see: https://github.com/apache/pulsar/pull/18688
+ */
+ @Test
+ public void testConcurrentCloseLedgerAndSwitchLedgerForReproduceIssue() throws Exception {
+ String managedLedgerName = "lg_" + UUID.randomUUID().toString().replaceAll("-", "_");
+
+ ManagedLedgerConfig config = new ManagedLedgerConfig();
+ config.setThrottleMarkDelete(1);
+ config.setMaximumRolloverTime(Integer.MAX_VALUE, TimeUnit.SECONDS);
+ config.setMaxEntriesPerLedger(5);
+
+ // create managedLedger once and close it.
+ ManagedLedgerImpl managedLedger1 = (ManagedLedgerImpl) factory.open(managedLedgerName, config);
+ waitManagedLedgerStateEquals(managedLedger1, ManagedLedgerImpl.State.LedgerOpened);
+ managedLedger1.close();
+
+ // create managedLedger the second time.
+ ManagedLedgerImpl managedLedger2 = (ManagedLedgerImpl) factory.open(managedLedgerName, config);
+ waitManagedLedgerStateEquals(managedLedger2, ManagedLedgerImpl.State.LedgerOpened);
+
+ // Mock the task create ledger complete now, it will change the state to another value which not is Closed.
+ // Close managedLedger1 the second time.
+ managedLedger1.createComplete(1, null, null);
+ managedLedger1.close();
+
+ // Verify managedLedger2 is still there.
+ Assert.assertFalse(factory.ledgers.isEmpty());
+ Assert.assertEquals(factory.ledgers.get(managedLedger2.getName()).join(), managedLedger2);
+
+ // cleanup.
+ managedLedger2.close();
+ }
+
+ private void waitManagedLedgerStateEquals(ManagedLedgerImpl managedLedger, ManagedLedgerImpl.State expectedStat){
+ Awaitility.await().untilAsserted(() ->
+ Assert.assertTrue(managedLedger.getState() == expectedStat));
+ }
+
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
index 7ddf6541c9a39..6e1f447225e53 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
@@ -26,6 +26,7 @@
import java.util.Map;
import java.util.UUID;
import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.offload.OffloadUtils;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.commons.lang3.RandomUtils;
@@ -33,6 +34,8 @@
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
/**
* ManagedLedgerInfo metadata test.
@@ -53,11 +56,9 @@ private Object[][] compressionTypeProvider() {
};
}
- @Test(dataProvider = "compressionTypeProvider")
- public void testEncodeAndDecode(String compressionType) throws IOException {
- long ledgerId = 10000;
+ private MLDataFormats.ManagedLedgerInfo.Builder generateManagedLedgerInfo(long ledgerId, int ledgerInfoNumber) {
List ledgerInfoList = new ArrayList<>();
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < ledgerInfoNumber; i++) {
MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder builder = MLDataFormats.ManagedLedgerInfo.LedgerInfo.newBuilder();
builder.setLedgerId(ledgerId);
builder.setEntries(RandomUtils.nextInt());
@@ -84,13 +85,18 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
ledgerId ++;
}
- MLDataFormats.ManagedLedgerInfo managedLedgerInfo = MLDataFormats.ManagedLedgerInfo.newBuilder()
- .addAllLedgerInfo(ledgerInfoList)
- .build();
+ return MLDataFormats.ManagedLedgerInfo.newBuilder()
+ .addAllLedgerInfo(ledgerInfoList);
+ }
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testEncodeAndDecode(String compressionType) throws IOException {
+ long ledgerId = 10000;
+ MLDataFormats.ManagedLedgerInfo managedLedgerInfo = generateManagedLedgerInfo(ledgerId,100).build();
MetaStoreImpl metaStore;
try {
- metaStore = new MetaStoreImpl(null, null, compressionType, null);
+ metaStore = new MetaStoreImpl(null, null, new MetadataCompressionConfig(compressionType), null);
if ("INVALID_TYPE".equals(compressionType)) {
Assert.fail("The managedLedgerInfo compression type is invalid, should fail.");
}
@@ -126,4 +132,45 @@ public void testParseEmptyData() throws InvalidProtocolBufferException {
Assert.assertEquals(managedLedgerInfo.toString(), "");
}
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testCompressionThreshold(String compressionType) {
+ long ledgerId = 10000;
+ int compressThreshold = 512;
+
+ // should not compress
+ MLDataFormats.ManagedLedgerInfo smallInfo = generateManagedLedgerInfo(ledgerId, 0).build();
+ assertTrue(smallInfo.getSerializedSize() < compressThreshold);
+
+ // should compress
+ MLDataFormats.ManagedLedgerInfo bigInfo = generateManagedLedgerInfo(ledgerId, 1000).build();
+ assertTrue(bigInfo.getSerializedSize() > compressThreshold);
+
+ MLDataFormats.ManagedLedgerInfo managedLedgerInfo = generateManagedLedgerInfo(ledgerId,100).build();
+
+ MetaStoreImpl metaStore;
+ try {
+ MetadataCompressionConfig metadataCompressionConfig =
+ new MetadataCompressionConfig(compressionType, compressThreshold);
+ metaStore = new MetaStoreImpl(null, null, metadataCompressionConfig, null);
+ if ("INVALID_TYPE".equals(compressionType)) {
+ Assert.fail("The managedLedgerInfo compression type is invalid, should fail.");
+ }
+ } catch (Exception e) {
+ if ("INVALID_TYPE".equals(compressionType)) {
+ Assert.assertEquals(e.getClass(), IllegalArgumentException.class);
+ Assert.assertEquals(
+ "No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ + compressionType, e.getMessage());
+ return;
+ } else {
+ throw e;
+ }
+ }
+
+ byte[] compressionBytes = metaStore.compressLedgerInfo(smallInfo);
+ assertEquals(compressionBytes.length, smallInfo.getSerializedSize());
+
+ byte[] compressionBytesBig = metaStore.compressLedgerInfo(bigInfo);
+ assertTrue(compressionBytesBig.length !=smallInfo.getSerializedSize());
+ }
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
index 3b011fe8d56f8..70ddbb9998fd8 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
@@ -48,6 +48,7 @@
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.security.GeneralSecurityException;
+import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -75,7 +76,9 @@
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import lombok.Cleanup;
+import lombok.Data;
import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.client.AsyncCallback;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BookKeeper;
@@ -85,6 +88,7 @@
import org.apache.bookkeeper.client.PulsarMockBookKeeper;
import org.apache.bookkeeper.client.PulsarMockLedgerHandle;
import org.apache.bookkeeper.client.api.LedgerEntries;
+import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.ReadHandle;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.mledger.AsyncCallbacks;
@@ -125,6 +129,7 @@
import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition;
import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig;
import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.api.Stat;
import org.apache.pulsar.metadata.api.extended.SessionEvent;
@@ -144,6 +149,117 @@ public Object[][] checkOwnershipFlagProvider() {
return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE } };
}
+ private void makeAddEntryTimeout(ManagedLedgerImpl ml, AtomicBoolean addEntryFinished) throws Exception {
+ LedgerHandle currentLedger = ml.currentLedger;
+ final LedgerHandle spyLedgerHandle = spy(currentLedger);
+ doAnswer(invocation -> {
+ ByteBuf bs = (ByteBuf) invocation.getArguments()[0];
+ AddCallback addCallback = (AddCallback) invocation.getArguments()[1];
+ Object originalContext = invocation.getArguments()[2];
+ currentLedger.asyncAddEntry(bs, (rc, lh, entryId, ctx) -> {
+ addEntryFinished.set(true);
+ addCallback.addComplete(BKException.Code.TimeoutException, spyLedgerHandle, -1, ctx);
+ }, originalContext);
+ return null;
+ }).when(spyLedgerHandle).asyncAddEntry(any(ByteBuf.class), any(AddCallback.class), any());
+ ml.currentLedger = spyLedgerHandle;
+ }
+
+ @Data
+ private static class DeleteLedgerInfo{
+ volatile boolean hasCalled;
+ volatile CompletableFuture future = new CompletableFuture<>();
+ }
+
+ private DeleteLedgerInfo makeDelayIfDoLedgerDelete(LedgerHandle ledger, final AtomicBoolean signal,
+ BookKeeper spyBookKeeper) {
+ DeleteLedgerInfo deleteLedgerInfo = new DeleteLedgerInfo();
+ doAnswer(invocation -> {
+ long ledgerId = (long) invocation.getArguments()[0];
+ AsyncCallback.DeleteCallback originalCb = (AsyncCallback.DeleteCallback) invocation.getArguments()[1];
+ AsyncCallback.DeleteCallback cb = (rc, ctx) -> {
+ if (deleteLedgerInfo.hasCalled) {
+ deleteLedgerInfo.future.complete(null);
+ }
+ originalCb.deleteComplete(rc, ctx);
+ };
+ Object ctx = invocation.getArguments()[2];
+ if (ledgerId != ledger.getId()){
+ bkc.asyncDeleteLedger(ledgerId, originalCb, ctx);
+ } else {
+ deleteLedgerInfo.hasCalled = true;
+ new Thread(() -> {
+ Awaitility.await().atMost(Duration.ofSeconds(60)).until(signal::get);
+ bkc.asyncDeleteLedger(ledgerId, cb, ctx);
+ }).start();
+ }
+ return null;
+ }).when(spyBookKeeper).asyncDeleteLedger(any(long.class), any(AsyncCallback.DeleteCallback.class), any());
+ return deleteLedgerInfo;
+ }
+
+ /***
+ * This test simulates the following problems that can occur when ZK connections are unstable:
+ * - add entry timeout
+ * - write ZK fail when update ledger info of ML
+ * and verifies that ledger info of ML is still correct when the above problems occur.
+ */
+ @Test
+ public void testLedgerInfoMetaCorrectIfAddEntryTimeOut() throws Exception {
+ String mlName = "testLedgerInfoMetaCorrectIfAddEntryTimeOut";
+ BookKeeper spyBookKeeper = spy(bkc);
+ ManagedLedgerFactoryImpl factory = new ManagedLedgerFactoryImpl(metadataStore, spyBookKeeper);
+ ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName);
+
+ // Make add entry timeout(The data write was actually successful).
+ AtomicBoolean addEntryFinished = new AtomicBoolean(false);
+ makeAddEntryTimeout(ml, addEntryFinished);
+
+ // Make the update operation of ledger info failure when switch ledger.
+ metadataStore.failConditional(new MetadataStoreException.BadVersionException(""), (opType, path) -> {
+ if (opType == FaultInjectionMetadataStore.OperationType.PUT && addEntryFinished.get()
+ && "/managed-ledgers/testLedgerInfoMetaCorrectIfAddEntryTimeOut".equals(path)) {
+ return true;
+ }
+ return false;
+ });
+
+ // Make delete ledger is delayed if delete is called.
+ AtomicBoolean deleteLedgerDelaySignal = new AtomicBoolean(false);
+ DeleteLedgerInfo deleteLedgerInfo =
+ makeDelayIfDoLedgerDelete(ml.currentLedger, deleteLedgerDelaySignal, spyBookKeeper);
+
+ // Add one entry.
+ // - it will fail and trigger ledger switch(we mocked the error).
+ // - ledger switch will also fail(we mocked the error).
+ try {
+ ml.addEntry("1".getBytes(Charset.defaultCharset()));
+ fail("Expected the operation of add entry will fail by timeout or ledger fenced.");
+ } catch (Exception e){
+ // expected ex.
+ }
+
+ // Reopen ML.
+ try {
+ ml.close();
+ fail("Expected the operation of ml close will fail by fenced state.");
+ } catch (Exception e){
+ // expected ex.
+ }
+ ManagedLedgerImpl mlReopened = (ManagedLedgerImpl) factory.open(mlName);
+ deleteLedgerDelaySignal.set(true);
+ if (deleteLedgerInfo.hasCalled){
+ deleteLedgerInfo.future.join();
+ }
+ mlReopened.close();
+
+ // verify: all ledgers in ledger info is worked.
+ for (long ledgerId : mlReopened.getLedgersInfo().keySet()){
+ LedgerHandle lh = bkc.openLedger(ledgerId, ml.digestType, ml.getConfig().getPassword());
+ lh.close();
+ }
+ }
+
@Test
public void managedLedgerApi() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
@@ -3744,12 +3860,26 @@ public void testInactiveLedgerRollOver() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("rollover_inactive", config);
ManagedCursor cursor = ledger.openCursor("c1");
+ List ledgerIds = new ArrayList<>();
+
int totalAddEntries = 5;
for (int i = 0; i < totalAddEntries; i++) {
String content = "entry"; // 5 bytes
ledger.checkInactiveLedgerAndRollOver();
ledger.addEntry(content.getBytes());
Thread.sleep(inactiveLedgerRollOverTimeMs * 5);
+
+ ledgerIds.add(ledger.currentLedger.getId());
+ }
+
+ Map ledgerMap = bkc.getLedgerMap();
+ // skip check last ledger, it should be open
+ for (int i = 0; i < ledgerIds.size() - 1; i++) {
+ long ledgerId = ledgerIds.get(i);
+ LedgerMetadata ledgerMetadata = ledgerMap.get(ledgerId).getLedgerMetadata();
+ if (ledgerMetadata != null) {
+ assertTrue(ledgerMetadata.isClosed());
+ }
}
List ledgers = ledger.getLedgersInfoAsList();
@@ -3855,4 +3985,29 @@ public void testGetEstimatedBacklogSize() throws Exception {
Assert.assertEquals(ledger.getEstimatedBacklogSize(((PositionImpl) positions.get(9)).getNext()), 0);
ledger.close();
}
+
+ @Test
+ public void testDeleteCursorTwice() throws Exception {
+ ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open("ml");
+ String cursorName = "cursor_1";
+ ml.openCursor(cursorName);
+ syncRemoveCursor(ml, cursorName);
+ syncRemoveCursor(ml, cursorName);
+ }
+
+ private void syncRemoveCursor(ManagedLedgerImpl ml, String cursorName){
+ CompletableFuture future = new CompletableFuture<>();
+ ml.getStore().asyncRemoveCursor(ml.name, cursorName, new MetaStoreCallback() {
+ @Override
+ public void operationComplete(Void result, Stat stat) {
+ future.complete(null);
+ }
+
+ @Override
+ public void operationFailed(MetaStoreException e) {
+ future.completeExceptionally(FutureUtil.unwrapCompletionException(e));
+ }
+ });
+ future.join();
+ }
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java
index e56d4488a3d5c..1ad3f5f8de631 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java
@@ -824,5 +824,20 @@ void deleteNonDurableCursorWithName() throws Exception {
assertEquals(Iterables.size(ledger.getCursors()), 0);
}
+ @Test
+ public void testMessagesConsumedCounterInitializedCorrect() throws Exception {
+ ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testMessagesConsumedCounterInitializedCorrect",
+ new ManagedLedgerConfig().setRetentionTime(1, TimeUnit.HOURS).setRetentionSizeInMB(1));
+ Position position = ledger.addEntry("1".getBytes(Encoding));
+ NonDurableCursorImpl cursor = (NonDurableCursorImpl) ledger.newNonDurableCursor(PositionImpl.EARLIEST);
+ cursor.delete(position);
+ assertEquals(cursor.getMessagesConsumedCounter(), 1);
+ assertTrue(cursor.getMessagesConsumedCounter() <= ledger.getEntriesAddedCounter());
+ // cleanup.
+ cursor.close();
+ ledger.close();
+ }
+
+
private static final Logger log = LoggerFactory.getLogger(NonDurableCursorTest.class);
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java
index c889f94b60801..80bb6256591bc 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java
@@ -207,6 +207,8 @@ public void tearDown() throws Exception {
}
// stop zookeeper service
try {
+ // cleanup for metrics.
+ metadataStore.close();
stopZKCluster();
} catch (Exception e) {
LOG.error("Got Exception while trying to stop ZKCluster", e);
diff --git a/pip/README.md b/pip/README.md
new file mode 100644
index 0000000000000..3ed9a1d34cd1d
--- /dev/null
+++ b/pip/README.md
@@ -0,0 +1,97 @@
+# Pulsar Improvement Proposal (PIP)
+
+## What is a PIP?
+
+The PIP is a "Pulsar Improvement Proposal" and it's the mechanism used to propose changes to the Apache Pulsar codebases.
+
+The changes might be in terms of new features, large code refactoring, changes to APIs.
+
+In practical terms, the PIP defines a process in which developers can submit a design doc, receive feedback and get the "go ahead" to execute.
+
+### What is the goal of a PIP?
+
+There are several goals for the PIP process:
+
+1. Ensure community technical discussion of major changes to the Apache Pulsar codebase.
+
+2. Provide clear and thorough design documentation of the proposed changes. Make sure every Pulsar developer will have enough context to effectively perform a code review of the Pull Requests.
+
+3. Use the PIP document to serve as the baseline on which to create the documentation for the new feature.
+
+4. Have greater scrutiny to changes are affecting the public APIs (as defined below) to reduce chances of introducing breaking changes or APIs that are not expressing an ideal semantic.
+
+It is not a goal for PIP to add undue process or slow-down the development.
+
+### When is a PIP required?
+
+* Any new feature for Pulsar brokers or client
+* Any change to the public APIs (Client APIs, REST APIs, Plugin APIs)
+* Any change to the wire protocol APIs
+* Any change to the API of Pulsar CLI tools (eg: new options)
+* Any change to the semantic of existing functionality, even when current behavior is incorrect.
+* Any large code change that will touch multiple components
+* Any changes to the metrics (metrics endpoint, topic stats, topics internal stats, broker stats, etc.)
+* Any change to the configuration
+
+### When is a PIP *not* required?
+
+* Bug-fixes
+* Simple enhancements that won't affect the APIs or the semantic
+* Small documentation changes
+* Small website changes
+* Build scripts changes (except: a complete rewrite)
+
+### Who can create a PIP?
+
+Any person willing to contribute to the Apache Pulsar project is welcome to create a PIP.
+
+## How does the PIP process work?
+
+A PIP proposal can be in these states:
+1. **DRAFT**: (Optional) This might be used for contributors to collaborate and to seek feedback on an incomplete version of the proposal.
+
+2. **DISCUSSION**: The proposal has been submitted to the community for feedback and approval.
+
+3. **ACCEPTED**: The proposal has been accepted by the Pulsar project.
+
+4. **REJECTED**: The proposal has not been accepted by the Pulsar project.
+
+5. **IMPLEMENTED**: The implementation of the proposed changes have been completed and everything has been merged.
+
+6. **RELEASED**: The proposed changes have been included in an official
+ Apache Pulsar release.
+
+
+The process works in the following way:
+
+1. Fork https://github.com/apache/pulsar repository (Using the fork button on GitHub).
+2. Clone the repository, and on it, copy the file `pip/TEMPLATE.md` and name it `pip-xxx.md`. The number `xxx` should be the next sequential number after the last contributed PIP. You view the list of contributed PIPs (at any status) as a list of Pull Requests having a title starting with `[pip][design] PIP-`. Use the link [here](https://github.com/apache/pulsar/pulls?q=is%3Apr+title%3A%22%5Bpip%5D%5Bdesign%5D+PIP-%22) as shortcut.
+3. Write the proposal following the section outlined by the template and the explanation for each section in the comment it contains (you can delete the comment once done).
+ * If you need diagrams, avoid attaching large files. You can use [MermaidJS](https://mermaid.js.org/) as simple language to describe many types of diagrams.
+4. Create GitHub Pull request (PR). The PR title should be `[pip][design] PIP-xxx: {title}`, where the `xxx` match the number given in previous step (file-name). Replace `{title}` with a short title to your proposal.
+5. The author(s) will email the dev@pulsar.apache.org mailing list to kick off a discussion, using subject prefix `[DISCUSS] PIP-xxx: {PIP TITLE}`. The discussion will happen in broader context either on the mailing list or as general comments on the PR. Many of the discussion items will be on particular aspect of the proposal, hence they should be as comments in the PR to specific lines in the proposal file.
+6. Update file with a link to the discussion on the mailing. You can obtain it from [Apache Pony Mail](https://lists.apache.org/list.html?dev@pulsar.apache.org).
+7. Based on the discussion and feedback, some changes might be applied by authors to the text of the proposal. They will be applied as extra commits, making it easier to track the changes.
+8. Once some consensus is reached, there will be a vote to formally approve the proposal. The vote will be held on the dev@pulsar.apache.org mailing list, by
+ sending a message using subject `[VOTE] PIP-xxx: {PIP TITLE}`.
+ Make sure to update the PIP with a link to the vote. You can obtain it from [Apache Pony Mail](https://lists.apache.org/list.html?dev@pulsar.apache.org).
+ Everyone is welcome to vote on the proposal, though only the vote of the PMC members will be considered binding.
+ It is required to have a lazy majority of at least 3 binding +1s votes.
+ The vote should stay open for at least 48 hours.
+9. When the vote is closed, if the outcome is positive, ask a PMC member (using voting thread on mailing list) to merge the PR.
+10. If the outcome is negative, please close the PR (with a small comment that the close is a result of a vote).
+
+All the future implementation Pull Requests that will be created, should always reference the PIP-XXX in the commit log message and the PR title.
+It is advised to create a master GitHub issue to formulate the execution plan and track its progress.
+
+## List of PIPs
+
+### Historical PIPs
+You can the view list of PIPs previously managed by GitHub wiki or GitHub issues [here](https://github.com/apache/pulsar/wiki#pulsar-improvement-proposals)
+
+### List of PIPs
+1. You can view all PIPs (besides the historical ones) as the list of Pull Requests having title starting with `[pip][design] PIP-`. Here is the [link](https://github.com/apache/pulsar/pulls?q=is%3Apr+title%3A%22%5Bpip%5D%5Bdesign%5D+PIP-%22) for it.
+ - Merged PR means the PIP was accepted.
+ - Closed PR means the PIP was rejected.
+ - Open PR means the PIP was submitted and is in the process of discussion.
+2. You can also take a look at the file in the `pip` folder. Each one is an approved PIP.
\ No newline at end of file
diff --git a/pip/TEMPLATE.md b/pip/TEMPLATE.md
new file mode 100644
index 0000000000000..549916c7eb929
--- /dev/null
+++ b/pip/TEMPLATE.md
@@ -0,0 +1,163 @@
+
+
+# Background knowledge
+
+
+
+# Motivation
+
+
+
+# Goals
+
+## In Scope
+
+
+
+## Out of Scope
+
+
+
+
+# High Level Design
+
+
+
+# Detailed Design
+
+## Design & Implementation Details
+
+
+
+## Public-facing Changes
+
+
+
+### Public API
+
+
+### Binary protocol
+
+### Configuration
+
+### CLI
+
+### Metrics
+
+
+
+
+# Monitoring
+
+
+
+# Security Considerations
+
+
+# Backward & Forward Compatibility
+
+## Revert
+
+
+
+## Upgrade
+
+
+
+# Alternatives
+
+
+
+# General Notes
+
+# Links
+
+
+* Mailing List discussion thread:
+* Mailing List voting thread:
diff --git a/pom.xml b/pom.xml
index a2e14fb0c6b3e..1c7cbb5de5516 100644
--- a/pom.xml
+++ b/pom.xml
@@ -32,7 +32,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOTPulsarPulsar is a distributed pub-sub messaging platform with a very
@@ -82,7 +82,7 @@ flexible messaging model and an intuitive client API.${maven.compiler.target}8
- 2.10.1
+ 3.2.0**/Test*.java,**/*Test.java,**/*Tests.java,**/*TestCase.java
@@ -92,8 +92,14 @@ flexible messaging model and an intuitive client API.
UTF-8UTF-8
- ${maven.build.timestamp}
+ 2023-05-03T02:53:27Ztrue
+
+
+
+
--add-opens java.base/jdk.internal.loader=ALL-UNNAMED
@@ -103,6 +109,7 @@ flexible messaging model and an intuitive client API.
--add-opens java.base/sun.net=ALL-UNNAMED
--add-opens java.management/sun.management=ALL-UNNAMED
--add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED
+ --add-opens java.base/jdk.internal.platform=ALL-UNNAMED
true4
@@ -126,33 +133,33 @@ flexible messaging model and an intuitive client API.
1.21
- 4.15.3
- 3.8.0
+ 4.16.1
+ 3.8.11.5.01.10.01.1.8.44.1.12.15.1.0
- 4.1.86.Final
- 0.0.16.Final
- 9.4.48.v20220622
+ 4.1.93.Final
+ 0.0.21.Final
+ 9.4.51.v202302172.5.22.341.10.500.16.0
- 3.9.8
- 6.29.4.1
+ 4.3.8
+ 7.9.21.7.324.42.18.01.691.0.61.0.2.3
- 2.13.4.20221013
- 0.9.11
+ 2.14.2
+ 0.10.21.6.28.37
- 0.40.2
+ 0.42.1true0.5.03.19.6
@@ -167,33 +174,35 @@ flexible messaging model and an intuitive client API.
2.2.03.11.24.4.20
- 2.8.2
+ 3.4.05.5.31.12.2621.10.22.10.102.5.05.1.0
- 3.36.0.3
+ 3.42.0.08.0.1142.5.1
- 0.3.2-patch11
+ 0.4.62.7.50.4.4-hotfix1
- 3.3.3
- 2.4.7
+ 3.3.5
+ 2.4.101.2.48.5.2
- 363
+ 3681.9.7.Final42.5.08.0.30
+
+ 1.15.16.Final0.11.10.28.02.10.2
- 3.3.4
- 2.4.15
- 31.0.1-jre
+ 3.3.5
+ 2.4.16
+ 32.0.0-jre1.00.16.16.2.8
@@ -214,7 +223,7 @@ flexible messaging model and an intuitive client API.
0.9.12.1.03.18.1
- 1.18.24
+ 1.18.261.3.22.3.11.2.0
@@ -222,19 +231,19 @@ flexible messaging model and an intuitive client API.
2.3.32.0.25.12.1
- 12.0.1
+ 18.0.04.9.32.8.0
- 1.4.32
+ 1.8.201.09.1.6
- 5.3.20
+ 5.3.274.5.134.4.15
- 0.5.11
- 1.32
+ 0.7.5
+ 2.01.10.125.3.33.4.3
@@ -242,20 +251,20 @@ flexible messaging model and an intuitive client API.
2.0.6
- 1.17.6
+ 1.18.32.2
- 3.2.13
+ 3.3.01.1.1
- 7.7.0
+ 7.7.13.12.43.25.0-GA1.5.03.14.2.01.2.22
- 1.5.3
+ 1.5.45.4.02.33.2
@@ -264,22 +273,21 @@ flexible messaging model and an intuitive client API.
3.0.04.11.0
- 3.1.0
+ 3.3.0
-
- 3.0.0-M3
- 3.4.2
- 3.10.1
- 3.4.0
+ 3.1.0
+ 3.5.0
+ 3.11.0
+ 3.5.02.3.03.4.13.1.01.1.0
- 1.3.4
+ 1.5.03.1.2
- 4.0.2
- 3.4.3
+ 4.9.10
+ 3.5.31.7.00.8.84.7.3.0
@@ -289,8 +297,8 @@ flexible messaging model and an intuitive client API.
0.1.41.30.4
- 7.4.4
- 0.9.15
+ 8.2.1
+ 0.9.441.6.16.4.0
@@ -876,6 +884,24 @@ flexible messaging model and an intuitive client API.
${caffeine.version}
+
+ org.bouncycastle
+ bcpkix-jdk15on
+ ${bouncycastle.version}
+
+
+
+ com.cronutils
+ cron-utils
+ ${cron-utils.version}
+
+
+ org.glassfish
+ javax.el
+
+
+
+
com.yahoo.athenzathenz-zts-java-client-core
@@ -888,6 +914,12 @@ flexible messaging model and an intuitive client API.
${athenz.version}
+
+ com.yahoo.athenz
+ athenz-cert-refresher
+ ${athenz.version}
+
+
com.github.zafarkhajajava-semver
@@ -1456,7 +1488,6 @@ flexible messaging model and an intuitive client API.
UTF-8truetrue
- truefalse
@@ -1505,10 +1536,17 @@ flexible messaging model and an intuitive client API.
listener
- org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier,org.apache.pulsar.tests.MockitoCleanupListener,org.apache.pulsar.tests.FastThreadLocalCleanupListener,org.apache.pulsar.tests.ThreadLeakDetectorListener,org.apache.pulsar.tests.SingletonCleanerListener
+ org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.JacocoDumpListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier,org.apache.pulsar.tests.MockitoCleanupListener,org.apache.pulsar.tests.FastThreadLocalCleanupListener,org.apache.pulsar.tests.ThreadLeakDetectorListener,org.apache.pulsar.tests.SingletonCleanerListener
+
+
+ org.apache.maven.surefire
+ surefire-testng
+ ${surefire.version}
+
+
@@ -1533,6 +1571,31 @@ flexible messaging model and an intuitive client API.
+
+ pl.project13.maven
+ git-commit-id-plugin
+ ${git-commit-id-plugin.version}
+
+
+ git-info
+
+ revision
+
+
+
+
+ true
+ true
+ git
+ false
+ false
+ false
+
+ true
+
+
+
+
com.mycilalicense-maven-plugin
@@ -1940,8 +2003,8 @@ flexible messaging model and an intuitive client API.
88
-
-
+
+
@@ -1969,6 +2032,7 @@ flexible messaging model and an intuitive client API.
${project.build.directory}/jacoco_${maven.build.timestamp}_${surefire.forkNumber}.exectrue
+ trueorg.apache.pulsar.*org.apache.bookkeeper.mledger.*
@@ -2132,6 +2196,7 @@ flexible messaging model and an intuitive client API.
pulsar-broker-auth-athenzpulsar-client-auth-athenzpulsar-sql
+ pulsar-broker-auth-oidcpulsar-broker-auth-saslpulsar-client-auth-saslpulsar-config-validation
@@ -2147,9 +2212,6 @@ flexible messaging model and an intuitive client API.
pulsar-io
-
- kafka-connect-avro-converter-shaded
-
bouncy-castle
@@ -2193,6 +2255,7 @@ flexible messaging model and an intuitive client API.
pulsar-websocketpulsar-proxypulsar-testclient
+ pulsar-broker-auth-oidcpulsar-broker-auth-saslpulsar-client-auth-saslpulsar-config-validation
@@ -2400,6 +2463,20 @@ flexible messaging model and an intuitive client API.
+
+ pulsar-io-elastic-tests
+
+ pulsar-io
+
+
+
+
+ pulsar-io-kafka-connect-tests
+
+ pulsar-io
+
+
+
pulsar-sql-tests
@@ -2433,5 +2510,4 @@ flexible messaging model and an intuitive client API.
-
diff --git a/pulsar-broker-auth-athenz/pom.xml b/pulsar-broker-auth-athenz/pom.xml
index 6d2bc54a25049..9c39e07b620ce 100644
--- a/pulsar-broker-auth-athenz/pom.xml
+++ b/pulsar-broker-auth-athenz/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 2.12.0-SNAPSHOT
+ 3.1.0-SNAPSHOTpulsar-broker-auth-athenz
diff --git a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
index 2e062b87a8325..652a922b9a5ad 100644
--- a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
+++ b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
@@ -43,6 +43,15 @@ public class AuthenticationProviderAthenz implements AuthenticationProvider {
private List domainNameList = null;
private int allowedOffset = 30;
+ public enum ErrorCode {
+ UNKNOWN,
+ NO_CLIENT,
+ NO_TOKEN,
+ NO_PUBLIC_KEY,
+ DOMAIN_MISMATCH,
+ INVALID_TOKEN,
+ }
+
@Override
public void initialize(ServiceConfiguration config) throws IOException {
String domainNames;
@@ -81,11 +90,13 @@ public String getAuthMethodName() {
public String authenticate(AuthenticationDataSource authData) throws AuthenticationException {
SocketAddress clientAddress;
String roleToken;
+ ErrorCode errorCode = ErrorCode.UNKNOWN;
try {
if (authData.hasDataFromPeer()) {
clientAddress = authData.getPeerAddress();
} else {
+ errorCode = ErrorCode.NO_CLIENT;
throw new AuthenticationException("Authentication data source does not have a client address");
}
@@ -94,13 +105,16 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
} else if (authData.hasDataFromHttp()) {
roleToken = authData.getHttpHeader(AuthZpeClient.ZPE_TOKEN_HDR);
} else {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Authentication data source does not have a role token");
}
if (roleToken == null) {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz token is null, can't authenticate");
}
if (roleToken.isEmpty()) {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz RoleToken is empty, Server is Using Athenz Authentication");
}
if (log.isDebugEnabled()) {
@@ -110,6 +124,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
RoleToken token = new RoleToken(roleToken);
if (!domainNameList.contains(token.getDomain())) {
+ errorCode = ErrorCode.DOMAIN_MISMATCH;
throw new AuthenticationException(
String.format("Athenz RoleToken Domain mismatch, Expected: %s, Found: %s",
domainNameList.toString(), token.getDomain()));
@@ -120,6 +135,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
PublicKey ztsPublicKey = AuthZpeClient.getZtsPublicKey(token.getKeyId());
if (ztsPublicKey == null) {
+ errorCode = ErrorCode.NO_PUBLIC_KEY;
throw new AuthenticationException("Unable to retrieve ZTS Public Key");
}
@@ -128,13 +144,13 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
return token.getPrincipal();
} else {
+ errorCode = ErrorCode.INVALID_TOKEN;
throw new AuthenticationException(
String.format("Athenz Role Token Not Authenticated from Client: %s", clientAddress));
}
}
} catch (AuthenticationException exception) {
- AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(),
- exception.getMessage());
+ incrementFailureMetric(errorCode);
throw exception;
}
}
diff --git a/pulsar-broker-auth-oidc/pom.xml b/pulsar-broker-auth-oidc/pom.xml
new file mode 100644
index 0000000000000..ca2b623d96eae
--- /dev/null
+++ b/pulsar-broker-auth-oidc/pom.xml
@@ -0,0 +1,191 @@
+
+
+
+ 4.0.0
+
+ org.apache.pulsar
+ pulsar
+ 3.1.0-SNAPSHOT
+
+
+ pulsar-broker-auth-oidc
+ jar
+ Open ID Connect authentication plugin for broker
+
+
+ 0.11.5
+
+
+
+
+
+ ${project.groupId}
+ pulsar-broker-common
+ ${project.version}
+
+
+ io.grpc
+ *
+
+
+
+
+
+ com.auth0
+ java-jwt
+ 4.3.0
+
+
+
+ com.auth0
+ jwks-rsa
+ 0.22.0
+
+
+
+ com.github.ben-manes.caffeine
+ caffeine
+
+
+
+ org.asynchttpclient
+ async-http-client
+
+
+
+ io.kubernetes
+ client-java
+ ${kubernetesclient.version}
+
+
+
+ io.prometheus
+ simpleclient_httpserver
+
+
+ bcpkix-jdk18on
+ org.bouncycastle
+
+
+ bcutil-jdk18on
+ org.bouncycastle
+
+
+ bcprov-jdk18on
+ org.bouncycastle
+
+
+
+
+
+ io.jsonwebtoken
+ jjwt-api
+ ${jsonwebtoken.version}
+ test
+
+
+ io.jsonwebtoken
+ jjwt-impl
+ ${jsonwebtoken.version}
+ test
+
+
+
+ com.github.tomakehurst
+ wiremock-jre8
+ ${wiremock.version}
+ test
+
+
+
+
+
+
+
+ test-jar-dependencies
+
+
+ maven.test.skip
+ !true
+
+
+
+
+ ${project.groupId}
+ pulsar-broker
+ ${project.version}
+ test
+ test-jar
+
+
+
+
+
+
+
+
+
+ org.gaul
+ modernizer-maven-plugin
+
+ true
+ 8
+
+
+
+ modernizer
+ verify
+
+ modernizer
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+
+
+ checkstyle
+ verify
+
+ check
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+ src/test/java/resources/fakeKubeConfig.yaml
+ ${project.basedir}/target/kubeconfig.yaml
+
+
+
+
+
+
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java
new file mode 100644
index 0000000000000..5f89f5f1370f1
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+/**
+ * Enum used to classify the types of exceptions encountered
+ * when attempting JWT verification.
+ */
+public enum AuthenticationExceptionCode {
+ UNSUPPORTED_ISSUER,
+ UNSUPPORTED_ALGORITHM,
+ ISSUER_MISMATCH,
+ ALGORITHM_MISMATCH,
+ INVALID_PUBLIC_KEY,
+ ERROR_RETRIEVING_PROVIDER_METADATA,
+ ERROR_RETRIEVING_PUBLIC_KEY,
+ ERROR_DECODING_JWT,
+ ERROR_VERIFYING_JWT,
+ ERROR_VERIFYING_JWT_SIGNATURE,
+ INVALID_JWT_CLAIM,
+ EXPIRED_JWT,
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java
new file mode 100644
index 0000000000000..2078666a08dd9
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsBoolean;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsInt;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsSet;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsString;
+import com.auth0.jwk.InvalidPublicKeyException;
+import com.auth0.jwk.Jwk;
+import com.auth0.jwt.JWT;
+import com.auth0.jwt.JWTVerifier;
+import com.auth0.jwt.RegisteredClaims;
+import com.auth0.jwt.algorithms.Algorithm;
+import com.auth0.jwt.exceptions.AlgorithmMismatchException;
+import com.auth0.jwt.exceptions.InvalidClaimException;
+import com.auth0.jwt.exceptions.JWTDecodeException;
+import com.auth0.jwt.exceptions.JWTVerificationException;
+import com.auth0.jwt.exceptions.SignatureVerificationException;
+import com.auth0.jwt.exceptions.TokenExpiredException;
+import com.auth0.jwt.interfaces.Claim;
+import com.auth0.jwt.interfaces.DecodedJWT;
+import com.auth0.jwt.interfaces.Verification;
+import io.kubernetes.client.openapi.ApiClient;
+import io.kubernetes.client.util.Config;
+import io.netty.handler.ssl.SslContext;
+import io.netty.handler.ssl.SslContextBuilder;
+import java.io.File;
+import java.io.IOException;
+import java.net.SocketAddress;
+import java.security.PublicKey;
+import java.security.interfaces.ECPublicKey;
+import java.security.interfaces.RSAPublicKey;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import javax.naming.AuthenticationException;
+import javax.net.ssl.SSLSession;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.apache.pulsar.broker.authentication.AuthenticationDataSource;
+import org.apache.pulsar.broker.authentication.AuthenticationProvider;
+import org.apache.pulsar.broker.authentication.AuthenticationProviderToken;
+import org.apache.pulsar.broker.authentication.AuthenticationState;
+import org.apache.pulsar.broker.authentication.metrics.AuthenticationMetrics;
+import org.apache.pulsar.common.api.AuthData;
+import org.asynchttpclient.AsyncHttpClient;
+import org.asynchttpclient.AsyncHttpClientConfig;
+import org.asynchttpclient.DefaultAsyncHttpClient;
+import org.asynchttpclient.DefaultAsyncHttpClientConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An {@link AuthenticationProvider} implementation that supports the usage of a JSON Web Token (JWT)
+ * for client authentication. This implementation retrieves the PublicKey from the JWT issuer (assuming the
+ * issuer is in the configured allowed list) and then uses that Public Key to verify the validity of the JWT's
+ * signature.
+ *
+ * The Public Keys for a given provider are cached based on certain configured parameters to improve performance.
+ * The tradeoff here is that the longer Public Keys are cached, the longer an invalidated token could be used. One way
+ * to ensure caches are cleared is to restart all brokers.
+ *
+ * Class is called from multiple threads. The implementation must be thread safe. This class expects to be loaded once
+ * and then called concurrently for each new connection. The cache is backed by a GuavaCachedJwkProvider, which is
+ * thread-safe.
+ *
+ * Supported algorithms are: RS256, RS384, RS512, ES256, ES384, ES512 where the naming conventions follow
+ * this RFC: https://datatracker.ietf.org/doc/html/rfc7518#section-3.1.
+ */
+public class AuthenticationProviderOpenID implements AuthenticationProvider {
+ private static final Logger log = LoggerFactory.getLogger(AuthenticationProviderOpenID.class);
+
+ private static final String SIMPLE_NAME = AuthenticationProviderOpenID.class.getSimpleName();
+
+ // Must match the value used by the OAuth2 Client Plugin.
+ private static final String AUTH_METHOD_NAME = "token";
+
+ // This is backed by an ObjectMapper, which is thread safe. It is an optimization
+ // to share this for decoding JWTs for all connections to this broker.
+ private final JWT jwtLibrary = new JWT();
+
+ private Set issuers;
+
+ // This caches the map from Issuer URL to the jwks_uri served at the /.well-known/openid-configuration endpoint
+ private OpenIDProviderMetadataCache openIDProviderMetadataCache;
+
+ // A cache used to store the results of getting the JWKS from the jwks_uri for an issuer.
+ private JwksCache jwksCache;
+
+ private volatile AsyncHttpClient httpClient;
+
+ // A list of supported algorithms. This is the "alg" field on the JWT.
+ // Source for strings: https://datatracker.ietf.org/doc/html/rfc7518#section-3.1.
+ private static final String ALG_RS256 = "RS256";
+ private static final String ALG_RS384 = "RS384";
+ private static final String ALG_RS512 = "RS512";
+ private static final String ALG_ES256 = "ES256";
+ private static final String ALG_ES384 = "ES384";
+ private static final String ALG_ES512 = "ES512";
+
+ private long acceptedTimeLeewaySeconds;
+ private FallbackDiscoveryMode fallbackDiscoveryMode;
+ private String roleClaim = ROLE_CLAIM_DEFAULT;
+ private boolean isRoleClaimNotSubject;
+
+ static final String ALLOWED_TOKEN_ISSUERS = "openIDAllowedTokenIssuers";
+ static final String ISSUER_TRUST_CERTS_FILE_PATH = "openIDTokenIssuerTrustCertsFilePath";
+ static final String FALLBACK_DISCOVERY_MODE = "openIDFallbackDiscoveryMode";
+ static final String ALLOWED_AUDIENCES = "openIDAllowedAudiences";
+ static final String ROLE_CLAIM = "openIDRoleClaim";
+ static final String ROLE_CLAIM_DEFAULT = "sub";
+ static final String ACCEPTED_TIME_LEEWAY_SECONDS = "openIDAcceptedTimeLeewaySeconds";
+ static final int ACCEPTED_TIME_LEEWAY_SECONDS_DEFAULT = 0;
+ static final String CACHE_SIZE = "openIDCacheSize";
+ static final int CACHE_SIZE_DEFAULT = 5;
+ static final String CACHE_REFRESH_AFTER_WRITE_SECONDS = "openIDCacheRefreshAfterWriteSeconds";
+ static final int CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT = 18 * 60 * 60;
+ static final String CACHE_EXPIRATION_SECONDS = "openIDCacheExpirationSeconds";
+ static final int CACHE_EXPIRATION_SECONDS_DEFAULT = 24 * 60 * 60;
+ static final String KEY_ID_CACHE_MISS_REFRESH_SECONDS = "openIDKeyIdCacheMissRefreshSeconds";
+ static final int KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT = 5 * 60;
+ static final String HTTP_CONNECTION_TIMEOUT_MILLIS = "openIDHttpConnectionTimeoutMillis";
+ static final int HTTP_CONNECTION_TIMEOUT_MILLIS_DEFAULT = 10_000;
+ static final String HTTP_READ_TIMEOUT_MILLIS = "openIDHttpReadTimeoutMillis";
+ static final int HTTP_READ_TIMEOUT_MILLIS_DEFAULT = 10_000;
+ static final String REQUIRE_HTTPS = "openIDRequireIssuersUseHttps";
+ static final boolean REQUIRE_HTTPS_DEFAULT = true;
+
+ // The list of audiences that are allowed to connect to this broker. A valid JWT must contain one of the audiences.
+ private String[] allowedAudiences;
+
+ @Override
+ public void initialize(ServiceConfiguration config) throws IOException {
+ this.allowedAudiences = validateAllowedAudiences(getConfigValueAsSet(config, ALLOWED_AUDIENCES));
+ this.roleClaim = getConfigValueAsString(config, ROLE_CLAIM, ROLE_CLAIM_DEFAULT);
+ this.isRoleClaimNotSubject = !ROLE_CLAIM_DEFAULT.equals(roleClaim);
+ this.acceptedTimeLeewaySeconds = getConfigValueAsInt(config, ACCEPTED_TIME_LEEWAY_SECONDS,
+ ACCEPTED_TIME_LEEWAY_SECONDS_DEFAULT);
+ boolean requireHttps = getConfigValueAsBoolean(config, REQUIRE_HTTPS, REQUIRE_HTTPS_DEFAULT);
+ this.fallbackDiscoveryMode = FallbackDiscoveryMode.valueOf(getConfigValueAsString(config,
+ FALLBACK_DISCOVERY_MODE, FallbackDiscoveryMode.DISABLED.name()));
+ this.issuers = validateIssuers(getConfigValueAsSet(config, ALLOWED_TOKEN_ISSUERS), requireHttps,
+ fallbackDiscoveryMode != FallbackDiscoveryMode.DISABLED);
+
+ int connectionTimeout = getConfigValueAsInt(config, HTTP_CONNECTION_TIMEOUT_MILLIS,
+ HTTP_CONNECTION_TIMEOUT_MILLIS_DEFAULT);
+ int readTimeout = getConfigValueAsInt(config, HTTP_READ_TIMEOUT_MILLIS, HTTP_READ_TIMEOUT_MILLIS_DEFAULT);
+ String trustCertsFilePath = getConfigValueAsString(config, ISSUER_TRUST_CERTS_FILE_PATH, null);
+ SslContext sslContext = null;
+ if (trustCertsFilePath != null) {
+ // Use default settings for everything but the trust store.
+ sslContext = SslContextBuilder.forClient()
+ .trustManager(new File(trustCertsFilePath))
+ .build();
+ }
+ AsyncHttpClientConfig clientConfig = new DefaultAsyncHttpClientConfig.Builder()
+ .setConnectTimeout(connectionTimeout)
+ .setReadTimeout(readTimeout)
+ .setSslContext(sslContext)
+ .build();
+ httpClient = new DefaultAsyncHttpClient(clientConfig);
+ ApiClient k8sApiClient =
+ fallbackDiscoveryMode != FallbackDiscoveryMode.DISABLED ? Config.defaultClient() : null;
+ this.openIDProviderMetadataCache = new OpenIDProviderMetadataCache(config, httpClient, k8sApiClient);
+ this.jwksCache = new JwksCache(config, httpClient, k8sApiClient);
+ }
+
+ @Override
+ public String getAuthMethodName() {
+ return AUTH_METHOD_NAME;
+ }
+
+ /**
+ * Authenticate the parameterized {@link AuthenticationDataSource} by verifying the issuer is an allowed issuer,
+ * then retrieving the JWKS URI from the issuer, then retrieving the Public key from the JWKS URI, and finally
+ * verifying the JWT signature and claims.
+ *
+ * @param authData - the authData passed by the Pulsar Broker containing the token.
+ * @return the role, if the JWT is authenticated, otherwise a failed future.
+ */
+ @Override
+ public CompletableFuture authenticateAsync(AuthenticationDataSource authData) {
+ return authenticateTokenAsync(authData).thenApply(this::getRole);
+ }
+
+ /**
+ * Authenticate the parameterized {@link AuthenticationDataSource} and return the decoded JWT.
+ * @param authData - the authData containing the token.
+ * @return a completed future with the decoded JWT, if the JWT is authenticated. Otherwise, a failed future.
+ */
+ CompletableFuture authenticateTokenAsync(AuthenticationDataSource authData) {
+ String token;
+ try {
+ token = AuthenticationProviderToken.getToken(authData);
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(e);
+ }
+ return authenticateToken(token)
+ .whenComplete((jwt, e) -> {
+ if (jwt != null) {
+ AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
+ }
+ // Failure metrics are incremented within methods above
+ });
+ }
+
+ /**
+ * Get the role from a JWT at the configured role claim field.
+ * NOTE: does not do any verification of the JWT
+ * @param jwt - token to get the role from
+ * @return the role, or null, if it is not set on the JWT
+ */
+ String getRole(DecodedJWT jwt) {
+ try {
+ Claim roleClaim = jwt.getClaim(this.roleClaim);
+ if (roleClaim.isNull()) {
+ // The claim was not present in the JWT
+ return null;
+ }
+
+ String role = roleClaim.asString();
+ if (role != null) {
+ // The role is non null only if the JSON node is a text field
+ return role;
+ }
+
+ List roles = jwt.getClaim(this.roleClaim).asList(String.class);
+ if (roles == null || roles.size() == 0) {
+ return null;
+ } else if (roles.size() == 1) {
+ return roles.get(0);
+ } else {
+ log.debug("JWT for subject [{}] has multiple roles; using the first one.", jwt.getSubject());
+ return roles.get(0);
+ }
+ } catch (JWTDecodeException e) {
+ log.error("Exception while retrieving role from JWT", e);
+ return null;
+ }
+ }
+
+ /**
+ * Convert a JWT string into a {@link DecodedJWT}
+ * The benefit of using this method is that it utilizes the already instantiated {@link JWT} parser.
+ * WARNING: this method does not verify the authenticity of the token. It only decodes it.
+ *
+ * @param token - string JWT to be decoded
+ * @return a decoded JWT
+ * @throws AuthenticationException if the token string is null or if any part of the token contains
+ * an invalid jwt or JSON format of each of the jwt parts.
+ */
+ DecodedJWT decodeJWT(String token) throws AuthenticationException {
+ if (token == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Invalid token: cannot be null");
+ }
+ try {
+ return jwtLibrary.decodeJwt(token);
+ } catch (JWTDecodeException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Unable to decode JWT: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Authenticate the parameterized JWT.
+ *
+ * @param token - a nonnull JWT to authenticate
+ * @return a fully authenticated JWT, or AuthenticationException if the JWT is proven to be invalid in any way
+ */
+ private CompletableFuture authenticateToken(String token) {
+ if (token == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(new AuthenticationException("JWT cannot be null"));
+ }
+ final DecodedJWT jwt;
+ try {
+ jwt = decodeJWT(token);
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(e);
+ }
+ return verifyIssuerAndGetJwk(jwt)
+ .thenCompose(jwk -> {
+ try {
+ if (!jwt.getAlgorithm().equals(jwk.getAlgorithm())) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ return CompletableFuture.failedFuture(
+ new AuthenticationException("JWK's alg [" + jwk.getAlgorithm()
+ + "] does not match JWT's alg [" + jwt.getAlgorithm() + "]"));
+ }
+ // Verify the JWT signature
+ // Throws exception if any verification check fails
+ return CompletableFuture
+ .completedFuture(verifyJWT(jwk.getPublicKey(), jwt.getAlgorithm(), jwt));
+ } catch (InvalidPublicKeyException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.INVALID_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(
+ new AuthenticationException("Invalid public key: " + e.getMessage()));
+ } catch (AuthenticationException e) {
+ return CompletableFuture.failedFuture(e);
+ }
+ });
+ }
+
+ /**
+ * Verify the JWT's issuer (iss) claim is one of the allowed issuers and then retrieve the JWK from the issuer. If
+ * not, see {@link FallbackDiscoveryMode} for the fallback behavior.
+ * @param jwt - the token to use to discover the issuer's JWKS URI, which is then used to retrieve the issuer's
+ * current public keys.
+ * @return a JWK that can be used to verify the JWT's signature
+ */
+ private CompletableFuture verifyIssuerAndGetJwk(DecodedJWT jwt) {
+ if (jwt.getIssuer() == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER);
+ return CompletableFuture.failedFuture(new AuthenticationException("Issuer cannot be null"));
+ } else if (this.issuers.contains(jwt.getIssuer())) {
+ // Retrieve the metadata: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(jwt.getIssuer())
+ .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId()));
+ } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_TRUSTED_ISSUER) {
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer())
+ .thenCompose(metadata ->
+ openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(metadata.getIssuer()))
+ .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId()));
+ } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_PUBLIC_KEYS) {
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer())
+ .thenCompose(__ -> jwksCache.getJwkFromKubernetesApiServer(jwt.getKeyId()));
+ } else {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER);
+ return CompletableFuture
+ .failedFuture(new AuthenticationException("Issuer not allowed: " + jwt.getIssuer()));
+ }
+ }
+
+ @Override
+ public AuthenticationState newAuthState(AuthData authData, SocketAddress remoteAddress, SSLSession sslSession)
+ throws AuthenticationException {
+ return new AuthenticationStateOpenID(this, remoteAddress, sslSession);
+ }
+
+ @Override
+ public void close() throws IOException {
+ httpClient.close();
+ }
+
+ /**
+ * Build and return a validator for the parameters.
+ *
+ * @param publicKey - the public key to use when configuring the validator
+ * @param publicKeyAlg - the algorithm for the parameterized public key
+ * @param jwt - jwt to be verified and returned (only if verified)
+ * @return a validator to use for validating a JWT associated with the parameterized public key.
+ * @throws AuthenticationException if the Public Key's algorithm is not supported or if the algorithm param does not
+ * match the Public Key's actual algorithm.
+ */
+ DecodedJWT verifyJWT(PublicKey publicKey,
+ String publicKeyAlg,
+ DecodedJWT jwt) throws AuthenticationException {
+ if (publicKeyAlg == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
+ throw new AuthenticationException("PublicKey algorithm cannot be null");
+ }
+
+ Algorithm alg;
+ try {
+ switch (publicKeyAlg) {
+ case ALG_RS256:
+ alg = Algorithm.RSA256((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_RS384:
+ alg = Algorithm.RSA384((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_RS512:
+ alg = Algorithm.RSA512((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_ES256:
+ alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null);
+ break;
+ case ALG_ES384:
+ alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null);
+ break;
+ case ALG_ES512:
+ alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null);
+ break;
+ default:
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
+ throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg);
+ }
+ } catch (ClassCastException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg.");
+ }
+
+ // We verify issuer when retrieving the PublicKey, so it is not verified here.
+ // The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken
+ Verification verifierBuilder = JWT.require(alg)
+ .acceptLeeway(acceptedTimeLeewaySeconds)
+ .withAnyOfAudience(allowedAudiences)
+ .withClaimPresence(RegisteredClaims.ISSUED_AT)
+ .withClaimPresence(RegisteredClaims.EXPIRES_AT)
+ .withClaimPresence(RegisteredClaims.NOT_BEFORE)
+ .withClaimPresence(RegisteredClaims.SUBJECT);
+
+ if (isRoleClaimNotSubject) {
+ verifierBuilder = verifierBuilder.withClaimPresence(roleClaim);
+ }
+
+ JWTVerifier verifier = verifierBuilder.build();
+
+ try {
+ return verifier.verify(jwt);
+ } catch (TokenExpiredException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT);
+ throw new AuthenticationException("JWT expired: " + e.getMessage());
+ } catch (SignatureVerificationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE);
+ throw new AuthenticationException("JWT signature verification exception: " + e.getMessage());
+ } catch (InvalidClaimException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM);
+ throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage());
+ } catch (AlgorithmMismatchException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage());
+ } catch (JWTDecodeException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Error while decoding JWT: " + e.getMessage());
+ } catch (JWTVerificationException | IllegalArgumentException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT);
+ throw new AuthenticationException("JWT verification failed: " + e.getMessage());
+ }
+ }
+
+ static void incrementFailureMetric(AuthenticationExceptionCode code) {
+ AuthenticationMetrics.authenticateFailure(SIMPLE_NAME, AUTH_METHOD_NAME, code);
+ }
+
+ /**
+ * Validate the configured allow list of allowedIssuers. The allowedIssuers set must be nonempty in order for
+ * the plugin to authenticate any token. Thus, it fails initialization if the configuration is
+ * missing. Each issuer URL should use the HTTPS scheme. The plugin fails initialization if any
+ * issuer url is insecure, unless requireHttps is false.
+ * @param allowedIssuers - issuers to validate
+ * @param requireHttps - whether to require https for issuers.
+ * @param allowEmptyIssuers - whether to allow empty issuers. This setting only makes sense when kubernetes is used
+ * as a fallback issuer.
+ * @return the validated issuers
+ * @throws IllegalArgumentException if the allowedIssuers is empty, or contains insecure issuers when required
+ */
+ private Set validateIssuers(Set allowedIssuers, boolean requireHttps, boolean allowEmptyIssuers) {
+ if (allowedIssuers == null || (allowedIssuers.isEmpty() && !allowEmptyIssuers)) {
+ throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_TOKEN_ISSUERS);
+ }
+ for (String issuer : allowedIssuers) {
+ if (!issuer.toLowerCase().startsWith("https://")) {
+ log.warn("Allowed issuer is not using https scheme: {}", issuer);
+ if (requireHttps) {
+ throw new IllegalArgumentException("Issuer URL does not use https, but must: " + issuer);
+ }
+ }
+ }
+ return allowedIssuers;
+ }
+
+ /**
+ * Validate the configured allow list of allowedAudiences. The allowedAudiences must be set because
+ * JWT must have an audience claim.
+ * See https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation.
+ * @param allowedAudiences
+ * @return the validated audiences
+ */
+ String[] validateAllowedAudiences(Set allowedAudiences) {
+ if (allowedAudiences == null || allowedAudiences.isEmpty()) {
+ throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_AUDIENCES);
+ }
+ return allowedAudiences.toArray(new String[0]);
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java
new file mode 100644
index 0000000000000..3046a6dd0e3b4
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import java.net.SocketAddress;
+import java.util.concurrent.CompletableFuture;
+import javax.naming.AuthenticationException;
+import javax.net.ssl.SSLSession;
+import org.apache.pulsar.broker.authentication.AuthenticationDataCommand;
+import org.apache.pulsar.broker.authentication.AuthenticationDataSource;
+import org.apache.pulsar.broker.authentication.AuthenticationState;
+import org.apache.pulsar.common.api.AuthData;
+
+/**
+ * Class representing the authentication state of a single connection.
+ */
+class AuthenticationStateOpenID implements AuthenticationState {
+ private final AuthenticationProviderOpenID provider;
+ private AuthenticationDataSource authenticationDataSource;
+ private volatile String role;
+ private final SocketAddress remoteAddress;
+ private final SSLSession sslSession;
+ private volatile long expiration;
+
+ AuthenticationStateOpenID(
+ AuthenticationProviderOpenID provider,
+ SocketAddress remoteAddress,
+ SSLSession sslSession) {
+ this.provider = provider;
+ this.remoteAddress = remoteAddress;
+ this.sslSession = sslSession;
+ }
+
+ @Override
+ public String getAuthRole() throws AuthenticationException {
+ if (role == null) {
+ throw new AuthenticationException("Authentication has not completed");
+ }
+ return role;
+ }
+
+ @Deprecated
+ @Override
+ public AuthData authenticate(AuthData authData) throws AuthenticationException {
+ // This method is not expected to be called and is subject to removal.
+ throw new AuthenticationException("Not supported");
+ }
+
+ @Override
+ public CompletableFuture authenticateAsync(AuthData authData) {
+ final String token = new String(authData.getBytes(), UTF_8);
+ this.authenticationDataSource = new AuthenticationDataCommand(token, remoteAddress, sslSession);
+ return provider
+ .authenticateTokenAsync(authenticationDataSource)
+ .thenApply(jwt -> {
+ this.role = provider.getRole(jwt);
+ // OIDC requires setting the exp claim, so this should never be null.
+ // We verify it is not null during token validation.
+ this.expiration = jwt.getExpiresAt().getTime();
+ // Single stage authentication, so return null here
+ return null;
+ });
+ }
+
+ @Override
+ public AuthenticationDataSource getAuthDataSource() {
+ return authenticationDataSource;
+ }
+
+ @Override
+ public boolean isComplete() {
+ return role != null;
+ }
+
+ @Override
+ public boolean isExpired() {
+ return System.currentTimeMillis() > expiration;
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java
new file mode 100644
index 0000000000000..f62bf9c818653
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class ConfigUtils {
+ private static final Logger log = LoggerFactory.getLogger(ConfigUtils.class);
+
+ /**
+ * Get configured property as a string. If not configured, return null.
+ * @param conf - the configuration map
+ * @param configProp - the property to get
+ * @return a string from the conf or null, if the configuration property was not set
+ */
+ static String getConfigValueAsString(ServiceConfiguration conf,
+ String configProp) throws IllegalArgumentException {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return value;
+ }
+
+ /**
+ * Get configured property as a string. If not configured, return null.
+ * @param conf - the configuration map
+ * @param configProp - the property to get
+ * @param defaultValue - the value to use if the configuration value is not set
+ * @return a string from the conf or the default value
+ */
+ static String getConfigValueAsString(ServiceConfiguration conf, String configProp,
+ String defaultValue) throws IllegalArgumentException {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ if (value == null) {
+ value = defaultValue;
+ }
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return value;
+ }
+
+ /**
+ * Get configured property as a set. Split using a comma delimiter and remove any extra whitespace surrounding
+ * the commas. If not configured, return the empty set.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @return a set of strings from the conf
+ */
+ static Set getConfigValueAsSet(ServiceConfiguration conf, String configProp) {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ if (StringUtils.isBlank(value)) {
+ log.info("Configuration for [{}] is the empty set.", configProp);
+ return Collections.emptySet();
+ }
+ Set set = Arrays.stream(value.trim().split("\\s*,\\s*")).collect(Collectors.toSet());
+ log.info("Configuration for [{}] is [{}].", configProp, String.join(", ", set));
+ return set;
+ }
+
+ private static String getConfigValueAsStringImpl(ServiceConfiguration conf,
+ String configProp) throws IllegalArgumentException {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof String) {
+ return (String) value;
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Utility method to get an integer from the {@link ServiceConfiguration}. If the value is not a valid long or the
+ * key is not present in the conf, the default value will be used.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @param defaultValue - the value to use if the property is missing from the conf
+ * @return a long
+ */
+ static int getConfigValueAsInt(ServiceConfiguration conf, String configProp, int defaultValue) {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof Integer) {
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return (Integer) value;
+ } else if (value instanceof String) {
+ try {
+ return Integer.parseInt((String) value);
+ } catch (NumberFormatException numberFormatException) {
+ log.error("Expected configuration for [{}] to be a long, but got [{}]. Using default value: [{}]",
+ configProp, value, defaultValue, numberFormatException);
+ return defaultValue;
+ }
+ } else {
+ log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue);
+ return defaultValue;
+ }
+ }
+
+ /**
+ * Utility method to get a boolean from the {@link ServiceConfiguration}. If the key is present in the conf,
+ * return the default value. If key is present the value is not a valid boolean, the result will be false.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @param defaultValue - the value to use if the property is missing from the conf
+ * @return a boolean
+ */
+ static boolean getConfigValueAsBoolean(ServiceConfiguration conf, String configProp, boolean defaultValue) {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof Boolean) {
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return (boolean) value;
+ } else if (value instanceof String) {
+ boolean result = Boolean.parseBoolean((String) value);
+ log.info("Configuration for [{}] is [{}]", configProp, result);
+ return result;
+ } else {
+ log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue);
+ return defaultValue;
+ }
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java
new file mode 100644
index 0000000000000..5bf0c1b23fce6
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import org.apache.pulsar.common.classification.InterfaceStability;
+
+/**
+ * These are the modes available for configuring how the Open ID Connect Authentication Provider should handle a JWT
+ * that has an issuer that is not explicitly in the allowed issuers set configured by
+ * {@link AuthenticationProviderOpenID#ALLOWED_TOKEN_ISSUERS}. The current implementations rely on using the Kubernetes
+ * Api Server's Open ID Connect features to discover an additional issuer or additional public keys to trust. See the
+ * Kubernetes documentation for more information on how Service Accounts can integrate with Open ID Connect.
+ * https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery
+ */
+@InterfaceStability.Evolving
+public enum FallbackDiscoveryMode {
+ /**
+ * There will be no discovery of additional trusted issuers or public keys. This setting requires that operators
+ * explicitly allow all issuers that will be trusted. For the Kubernetes Service Account Token Projections to work,
+ * the operator must explicitly trust the issuer on the token's "iss" claim. This is the default setting because it
+ * is the only mode that explicitly follows the OIDC spec for verification of discovered provider configuration.
+ */
+ DISABLED,
+
+ /**
+ * The Kubernetes Api Server will be used to discover an additional trusted issuer by getting the issuer at the
+ * Api Server's /.well-known/openid-configuration endpoint, verifying that issuer matches the "iss" claim on the
+ * supplied token, then treating that issuer as a trusted issuer by discovering the jwks_uri via that issuer's
+ * /.well-known/openid-configuration endpoint. This mode can be helpful in EKS environments where the Api Server's
+ * public keys served at the /openid/v1/jwks endpoint are not the same as the public keys served at the issuer's
+ * jwks_uri. It fails to be OIDC compliant because the URL used to discover the provider configuration is not the
+ * same as the issuer claim on the token.
+ */
+ KUBERNETES_DISCOVER_TRUSTED_ISSUER,
+
+ /**
+ * The Kubernetes Api Server will be used to discover an additional set of valid public keys by getting the issuer
+ * at the Api Server's /.well-known/openid-configuration endpoint, verifying that issuer matches the "iss" claim on
+ * the supplied token, then calling the Api Server endpoint to get the public keys using a kubernetes client. This
+ * mode is currently useful getting the public keys from the Api Server because the Api Server requires custom TLS
+ * and authentication, and the kubernetes client automatically handles those. It fails to be OIDC compliant because
+ * the URL used to discover the provider configuration is not the same as the issuer claim on the token.
+ */
+ KUBERNETES_DISCOVER_PUBLIC_KEYS,
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java
new file mode 100644
index 0000000000000..73934e9c1e05e
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_EXPIRATION_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_EXPIRATION_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_REFRESH_AFTER_WRITE_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.incrementFailureMetric;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsInt;
+import com.auth0.jwk.Jwk;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.github.benmanes.caffeine.cache.AsyncCacheLoader;
+import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import io.kubernetes.client.openapi.ApiCallback;
+import io.kubernetes.client.openapi.ApiClient;
+import io.kubernetes.client.openapi.ApiException;
+import io.kubernetes.client.openapi.apis.OpenidApi;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import javax.naming.AuthenticationException;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.asynchttpclient.AsyncHttpClient;
+
+public class JwksCache {
+
+ // Map from an issuer's JWKS URI to its JWKS. When the Issuer is not empty, use the fallback client.
+ private final AsyncLoadingCache, List> cache;
+ private final ConcurrentHashMap, Long> jwksLastRefreshTime = new ConcurrentHashMap<>();
+ private final long keyIdCacheMissRefreshNanos;
+ private final ObjectReader reader = new ObjectMapper().readerFor(HashMap.class);
+ private final AsyncHttpClient httpClient;
+ private final OpenidApi openidApi;
+
+ JwksCache(ServiceConfiguration config, AsyncHttpClient httpClient, ApiClient apiClient) throws IOException {
+ // Store the clients
+ this.httpClient = httpClient;
+ this.openidApi = apiClient != null ? new OpenidApi(apiClient) : null;
+ keyIdCacheMissRefreshNanos = TimeUnit.SECONDS.toNanos(getConfigValueAsInt(config,
+ KEY_ID_CACHE_MISS_REFRESH_SECONDS, KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT));
+ // Configure the cache
+ int maxSize = getConfigValueAsInt(config, CACHE_SIZE, CACHE_SIZE_DEFAULT);
+ int refreshAfterWriteSeconds = getConfigValueAsInt(config, CACHE_REFRESH_AFTER_WRITE_SECONDS,
+ CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT);
+ int expireAfterSeconds = getConfigValueAsInt(config, CACHE_EXPIRATION_SECONDS,
+ CACHE_EXPIRATION_SECONDS_DEFAULT);
+ AsyncCacheLoader, List> loader = (jwksUri, executor) -> {
+ // Store the time of the retrieval, even though it might be a little early or the call might fail.
+ jwksLastRefreshTime.put(jwksUri, System.nanoTime());
+ if (jwksUri.isPresent()) {
+ return getJwksFromJwksUri(jwksUri.get());
+ } else {
+ return getJwksFromKubernetesApiServer();
+ }
+ };
+ this.cache = Caffeine.newBuilder()
+ .maximumSize(maxSize)
+ .refreshAfterWrite(refreshAfterWriteSeconds, TimeUnit.SECONDS)
+ .expireAfterWrite(expireAfterSeconds, TimeUnit.SECONDS)
+ .buildAsync(loader);
+ }
+
+ CompletableFuture getJwk(String jwksUri, String keyId) {
+ if (jwksUri == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(new IllegalArgumentException("jwksUri must not be null."));
+ }
+ return getJwkAndMaybeReload(Optional.of(jwksUri), keyId, false);
+ }
+
+ /**
+ * Retrieve the JWK for the given key ID from the given JWKS URI. If the key ID is not found, and failOnMissingKeyId
+ * is false, then the JWK will be reloaded from the JWKS URI and the key ID will be searched for again.
+ */
+ private CompletableFuture getJwkAndMaybeReload(Optional maybeJwksUri,
+ String keyId,
+ boolean failOnMissingKeyId) {
+ return cache
+ .get(maybeJwksUri)
+ .thenCompose(jwks -> {
+ try {
+ return CompletableFuture.completedFuture(getJwkForKID(maybeJwksUri, jwks, keyId));
+ } catch (IllegalArgumentException e) {
+ if (failOnMissingKeyId) {
+ throw e;
+ } else {
+ Long lastRefresh = jwksLastRefreshTime.get(maybeJwksUri);
+ if (lastRefresh == null || System.nanoTime() - lastRefresh > keyIdCacheMissRefreshNanos) {
+ // In this case, the key ID was not found, but we haven't refreshed the JWKS in a while,
+ // so it is possible the key ID was added. Refresh the JWKS and try again.
+ cache.synchronous().invalidate(maybeJwksUri);
+ }
+ // There is a small race condition where the JWKS could be refreshed by another thread,
+ // so we retry getting the JWK, even though we might not have invalidated the cache.
+ return getJwkAndMaybeReload(maybeJwksUri, keyId, true);
+ }
+ }
+ });
+ }
+
+ private CompletableFuture> getJwksFromJwksUri(String jwksUri) {
+ return httpClient
+ .prepareGet(jwksUri)
+ .execute()
+ .toCompletableFuture()
+ .thenCompose(result -> {
+ CompletableFuture> future = new CompletableFuture<>();
+ try {
+ HashMap jwks =
+ reader.readValue(result.getResponseBodyAsBytes());
+ future.complete(convertToJwks(jwksUri, jwks));
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(e);
+ } catch (Exception e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(new AuthenticationException(
+ "Error retrieving public key at " + jwksUri + ": " + e.getMessage()));
+ }
+ return future;
+ });
+ }
+
+ CompletableFuture getJwkFromKubernetesApiServer(String keyId) {
+ if (openidApi == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(new AuthenticationException(
+ "Failed to retrieve public key from Kubernetes API server: Kubernetes fallback is not enabled."));
+ }
+ return getJwkAndMaybeReload(Optional.empty(), keyId, false);
+ }
+
+ private CompletableFuture> getJwksFromKubernetesApiServer() {
+ CompletableFuture> future = new CompletableFuture<>();
+ try {
+ openidApi.getServiceAccountIssuerOpenIDKeysetAsync(new ApiCallback() {
+ @Override
+ public void onFailure(ApiException e, int statusCode, Map> responseHeaders) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ // We want the message and responseBody here: https://github.com/kubernetes-client/java/issues/2066.
+ future.completeExceptionally(
+ new AuthenticationException("Failed to retrieve public key from Kubernetes API server. "
+ + "Message: " + e.getMessage() + " Response body: " + e.getResponseBody()));
+ }
+
+ @Override
+ public void onSuccess(String result, int statusCode, Map> responseHeaders) {
+ try {
+ HashMap jwks = reader.readValue(result);
+ future.complete(convertToJwks("Kubernetes API server", jwks));
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(e);
+ } catch (Exception e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(new AuthenticationException(
+ "Error retrieving public key at Kubernetes API server: " + e.getMessage()));
+ }
+ }
+
+ @Override
+ public void onUploadProgress(long bytesWritten, long contentLength, boolean done) {
+
+ }
+
+ @Override
+ public void onDownloadProgress(long bytesRead, long contentLength, boolean done) {
+
+ }
+ });
+ } catch (ApiException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(
+ new AuthenticationException("Failed to retrieve public key from Kubernetes API server: "
+ + e.getMessage()));
+ }
+ return future;
+ }
+
+ private Jwk getJwkForKID(Optional maybeJwksUri, List jwks, String keyId) {
+ for (Jwk jwk : jwks) {
+ if (jwk.getId().equals(keyId)) {
+ return jwk;
+ }
+ }
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ throw new IllegalArgumentException("No JWK found for Key ID " + keyId);
+ }
+
+ /**
+ * The JWK Set is stored in the "keys" key see https://www.rfc-editor.org/rfc/rfc7517#section-5.1.
+ *
+ * @param jwksUri - the URI used to retrieve the JWKS
+ * @param jwks - the JWKS to convert
+ * @return a list of {@link Jwk}
+ */
+ private List convertToJwks(String jwksUri, Map jwks) throws AuthenticationException {
+ try {
+ @SuppressWarnings("unchecked")
+ List